positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def save(self, fname: str): """ Saves this Config (without the frozen state) to a file called fname. :param fname: Name of file to store this Config in. """ obj = copy.deepcopy(self) obj.__del_frozen() with open(fname, 'w') as out: yaml.dump(obj, out, default_flow_style=False)
Saves this Config (without the frozen state) to a file called fname. :param fname: Name of file to store this Config in.
def get_ip_address_from_request(request): """ Makes the best attempt to get the client's real IP or return the loopback """ PRIVATE_IPS_PREFIX = ('10.', '172.', '192.', '127.') ip_address = '' x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR', '') if x_forwarded_for and ',' not in x_forwarded_for: if not x_forwarded_for.startswith(PRIVATE_IPS_PREFIX) and is_valid_ip(x_forwarded_for): ip_address = x_forwarded_for.strip() else: ips = [ip.strip() for ip in x_forwarded_for.split(',')] for ip in ips: if ip.startswith(PRIVATE_IPS_PREFIX): continue elif not is_valid_ip(ip): continue else: ip_address = ip break if not ip_address: x_real_ip = request.META.get('HTTP_X_REAL_IP', '') if x_real_ip: if not x_real_ip.startswith(PRIVATE_IPS_PREFIX) and is_valid_ip(x_real_ip): ip_address = x_real_ip.strip() if not ip_address: remote_addr = request.META.get('REMOTE_ADDR', '') if remote_addr: if not remote_addr.startswith(PRIVATE_IPS_PREFIX) and is_valid_ip(remote_addr): ip_address = remote_addr.strip() if remote_addr.startswith(PRIVATE_IPS_PREFIX) and is_valid_ip(remote_addr): ip_address = remote_addr.strip() if not ip_address: ip_address = '127.0.0.1' return ip_address
Makes the best attempt to get the client's real IP or return the loopback
def get_all(self): """Return all equipments in database :return: Dictionary with the following structure: :: {'equipaments': {'name' :< name_equipament >}, {... demais equipamentos ...} } :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ url = 'equipment/all' code, xml = self.submit(None, 'GET', url) return self.response(code, xml)
Return all equipments in database :return: Dictionary with the following structure: :: {'equipaments': {'name' :< name_equipament >}, {... demais equipamentos ...} } :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
def get(self, obj_id): """ Get a single item :param obj_id: int :return: dict|str """ response = self._client.session.get( '{url}/{id}'.format( url=self.endpoint_url, id=obj_id ) ) return self.process_response(response)
Get a single item :param obj_id: int :return: dict|str
def _run(self, bundle, container_id=None, empty_process=False, log_path=None, pid_file=None, sync_socket=None, command="run", log_format="kubernetes"): ''' _run is the base function for run and create, the only difference between the two being that run does not have an option for sync_socket. Equivalent command line example: singularity oci create [create options...] <container_ID> Parameters ========== bundle: the full path to the bundle folder container_id: an optional container_id. If not provided, use same container_id used to generate OciImage instance empty_process: run container without executing container process (for example, for a pod container waiting for signals). This is a specific use case for tools like Kubernetes log_path: the path to store the log. pid_file: specify the pid file path to use sync_socket: the path to the unix socket for state synchronization. command: the command (run or create) to use (default is run) log_format: defaults to kubernetes. Can also be "basic" or "json" ''' container_id = self.get_container_id(container_id) # singularity oci create cmd = self._init_command(command) # Check that the bundle exists if not os.path.exists(bundle): bot.exit('Bundle not found at %s' % bundle) # Add the bundle cmd = cmd + ['--bundle', bundle] # Additional Logging Files cmd = cmd + ['--log-format', log_format] if log_path != None: cmd = cmd + ['--log-path', log_path] if pid_file != None: cmd = cmd + ['--pid-file', pid_file] if sync_socket != None: cmd = cmd + ['--sync-socket', sync_socket] if empty_process: cmd.append('--empty-process') # Finally, add the container_id cmd.append(container_id) # Generate the instance result = self._send_command(cmd, sudo=True) # Get the status to report to the user! # TODO: Singularity seems to create even with error, can we check and # delete for the user if this happens? return self.state(container_id, sudo=True, sync_socket=sync_socket)
_run is the base function for run and create, the only difference between the two being that run does not have an option for sync_socket. Equivalent command line example: singularity oci create [create options...] <container_ID> Parameters ========== bundle: the full path to the bundle folder container_id: an optional container_id. If not provided, use same container_id used to generate OciImage instance empty_process: run container without executing container process (for example, for a pod container waiting for signals). This is a specific use case for tools like Kubernetes log_path: the path to store the log. pid_file: specify the pid file path to use sync_socket: the path to the unix socket for state synchronization. command: the command (run or create) to use (default is run) log_format: defaults to kubernetes. Can also be "basic" or "json"
def get_events(self, **kwargs): """Retrieve events from server.""" force = kwargs.pop('force', False) response = api.request_sync_events(self.blink, self.network_id, force=force) try: return response['event'] except (TypeError, KeyError): _LOGGER.error("Could not extract events: %s", response, exc_info=True) return False
Retrieve events from server.
def _setup_chassis(self): """ Sets up the router with the corresponding chassis (create slots and insert default adapters). """ self._create_slots(2) self._slots[0] = self.integrated_adapters[self._chassis]()
Sets up the router with the corresponding chassis (create slots and insert default adapters).
def add_to_capabilities(self, capabilities): """ Adds proxy information as capability in specified capabilities. :Args: - capabilities: The capabilities to which proxy will be added. """ proxy_caps = {} proxy_caps['proxyType'] = self.proxyType['string'] if self.autodetect: proxy_caps['autodetect'] = self.autodetect if self.ftpProxy: proxy_caps['ftpProxy'] = self.ftpProxy if self.httpProxy: proxy_caps['httpProxy'] = self.httpProxy if self.proxyAutoconfigUrl: proxy_caps['proxyAutoconfigUrl'] = self.proxyAutoconfigUrl if self.sslProxy: proxy_caps['sslProxy'] = self.sslProxy if self.noProxy: proxy_caps['noProxy'] = self.noProxy if self.socksProxy: proxy_caps['socksProxy'] = self.socksProxy if self.socksUsername: proxy_caps['socksUsername'] = self.socksUsername if self.socksPassword: proxy_caps['socksPassword'] = self.socksPassword capabilities['proxy'] = proxy_caps
Adds proxy information as capability in specified capabilities. :Args: - capabilities: The capabilities to which proxy will be added.
def build_tokens_line(self): """Build a logical line from tokens.""" logical = [] comments = [] length = 0 prev_row = prev_col = mapping = None for token_type, text, start, end, line in self.tokens: if token_type in SKIP_TOKENS: continue if not mapping: mapping = [(0, start)] if token_type == tokenize.COMMENT: comments.append(text) continue if token_type == tokenize.STRING: text = mute_string(text) if prev_row: (start_row, start_col) = start if prev_row != start_row: # different row prev_text = self.lines[prev_row - 1][prev_col - 1] if prev_text == ',' or (prev_text not in '{[(' and text not in '}])'): text = ' ' + text elif prev_col != start_col: # different column text = line[prev_col:start_col] + text logical.append(text) length += len(text) mapping.append((length, end)) (prev_row, prev_col) = end self.logical_line = ''.join(logical) self.noqa = comments and noqa(''.join(comments)) return mapping
Build a logical line from tokens.
def set(self, num): """ Sets current value to num """ if self.validate(num) is not None: self.index = self.allowed.index(num) IntegerEntry.set(self, num)
Sets current value to num
def write(nml, nml_path, force=False, sort=False): """Save a namelist to disk using either a file object or its file path. File object usage: >>> with open(nml_path, 'w') as nml_file: >>> f90nml.write(nml, nml_file) File path usage: >>> f90nml.write(nml, 'data.nml') This function is equivalent to the ``write`` function of the ``Namelist`` object ``nml``. >>> nml.write('data.nml') By default, ``write`` will not overwrite an existing file. To override this, use the ``force`` flag. >>> nml.write('data.nml', force=True) To alphabetically sort the ``Namelist`` keys, use the ``sort`` flag. >>> nml.write('data.nml', sort=True) """ # Promote dicts to Namelists if not isinstance(nml, Namelist) and isinstance(nml, dict): nml_in = Namelist(nml) else: nml_in = nml nml_in.write(nml_path, force=force, sort=sort)
Save a namelist to disk using either a file object or its file path. File object usage: >>> with open(nml_path, 'w') as nml_file: >>> f90nml.write(nml, nml_file) File path usage: >>> f90nml.write(nml, 'data.nml') This function is equivalent to the ``write`` function of the ``Namelist`` object ``nml``. >>> nml.write('data.nml') By default, ``write`` will not overwrite an existing file. To override this, use the ``force`` flag. >>> nml.write('data.nml', force=True) To alphabetically sort the ``Namelist`` keys, use the ``sort`` flag. >>> nml.write('data.nml', sort=True)
def build_view_from_tag(self, tag): """ Build a view of group of Symbols based on their tag. Parameters ---------- tag : str Use '%' to enable SQL's "LIKE" functionality. Note ---- This function is written without SQLAlchemy, so it only tested on Postgres. """ syms = self.search_tag(tag) names = [sym.name for sym in syms] subs = ["SELECT indx, '{}' AS symbol, final FROM {}".format(s, s) for s in names] qry = " UNION ALL ".join(subs) qry = "CREATE VIEW {} AS {};".format(tag, qry) self.ses.execute("DROP VIEW IF EXISTS {};".format(tag)) self.ses.commit() self.ses.execute(qry) self.ses.commit()
Build a view of group of Symbols based on their tag. Parameters ---------- tag : str Use '%' to enable SQL's "LIKE" functionality. Note ---- This function is written without SQLAlchemy, so it only tested on Postgres.
def checkIsReachable(rh): """ Check if a virtual machine is reachable. Input: Request Handle with the following properties: function - 'POWERVM' subfunction - 'ISREACHABLE' userid - userid of the virtual machine Output: Request Handle updated with the results. overallRC - 0: determined the status, non-zero: some weird failure while trying to execute a command on the guest via IUCV rc - RC returned from execCmdThruIUCV rs - 0: not reachable, 1: reachable """ rh.printSysLog("Enter powerVM.checkIsReachable, userid: " + rh.userid) strCmd = "echo 'ping'" results = execCmdThruIUCV(rh, rh.userid, strCmd) if results['overallRC'] == 0: rh.printLn("N", rh.userid + ": reachable") reachable = 1 else: # A failure from execCmdThruIUCV is acceptable way of determining # that the system is unreachable. We won't pass along the # error message. rh.printLn("N", rh.userid + ": unreachable") reachable = 0 rh.updateResults({"rs": reachable}) rh.printSysLog("Exit powerVM.checkIsReachable, rc: 0") return 0
Check if a virtual machine is reachable. Input: Request Handle with the following properties: function - 'POWERVM' subfunction - 'ISREACHABLE' userid - userid of the virtual machine Output: Request Handle updated with the results. overallRC - 0: determined the status, non-zero: some weird failure while trying to execute a command on the guest via IUCV rc - RC returned from execCmdThruIUCV rs - 0: not reachable, 1: reachable
def print_plot_line(function, popt, xs, ys, name, tol=0.05, extra=''): """ print the gnuplot command line to plot the x, y data with the fitted function using the popt parameters """ idp = id_generator() f = open('convdat.'+str(idp), mode='w') for n in range(0, len(ys), 1): f.write(str(xs[n]) + ' ' + str(ys[n]) + '\n') f.close() tol = abs(tol) line = "plot 'convdat.%s' pointsize 4 lt 0, " % idp line += '%s lt 3, %s lt 4, %s lt 4, ' % (popt[0], popt[0] - tol, popt[0] + tol) if function is exponential: line += "%s + %s * %s ** -x" % (popt[0], popt[1], min(max(1.00001, popt[2]), 1.2)) elif function is reciprocal: line += "%s + %s / x**%s" % (popt[0], popt[1], min(max(0.5, popt[2]), 6)) elif function is single_reciprocal: line += "%s + %s / (x - %s)" % (popt[0], popt[1], popt[2]) elif function is simple_reciprocal: line += "%s + %s / x" % (popt[0], popt[1]) elif function is simple_2reciprocal: line += "%s + %s / x**2" % (popt[0], popt[1]) elif function is simple_4reciprocal: line += "%s + %s / x**4" % (popt[0], popt[1]) elif function is simple_5reciprocal: line += "%s + %s / x**0.5" % (popt[0], popt[1]) else: print(function, ' no plot ') with open('plot-fits', mode='a') as f: f.write('set title "' + name + ' - ' + extra + '"\n') f.write("set output '" + name + '-' + idp + ".gif'" + '\n') f.write("set yrange [" + str(popt[0] - 5 * tol) + ':' + str(popt[0] + 5 * tol)+']\n') f.write(line + '\n') f.write('pause -1 \n')
print the gnuplot command line to plot the x, y data with the fitted function using the popt parameters
def _normalize_array(array, domain=(0, 1)): """Given an arbitrary rank-3 NumPy array, produce one representing an image. This ensures the resulting array has a dtype of uint8 and a domain of 0-255. Args: array: NumPy array representing the image domain: expected range of values in array, defaults to (0, 1), if explicitly set to None will use the array's own range of values and normalize them. Returns: normalized PIL.Image """ # first copy the input so we're never mutating the user's data array = np.array(array) # squeeze helps both with batch=1 and B/W and PIL's mode inference array = np.squeeze(array) assert len(array.shape) <= 3 assert np.issubdtype(array.dtype, np.number) assert not np.isnan(array).any() low, high = np.min(array), np.max(array) if domain is None: message = "No domain specified, normalizing from measured (~%.2f, ~%.2f)" log.debug(message, low, high) domain = (low, high) # clip values if domain was specified and array contains values outside of it if low < domain[0] or high > domain[1]: message = "Clipping domain from (~{:.2f}, ~{:.2f}) to (~{:.2f}, ~{:.2f})." log.info(message.format(low, high, domain[0], domain[1])) array = array.clip(*domain) min_value, max_value = np.iinfo(np.uint8).min, np.iinfo(np.uint8).max # 0, 255 # convert signed to unsigned if needed if np.issubdtype(array.dtype, np.inexact): offset = domain[0] if offset != 0: array -= offset log.debug("Converting inexact array by subtracting -%.2f.", offset) scalar = max_value / (domain[1] - domain[0]) if scalar != 1: array *= scalar log.debug("Converting inexact array by scaling by %.2f.", scalar) return array.clip(min_value, max_value).astype(np.uint8)
Given an arbitrary rank-3 NumPy array, produce one representing an image. This ensures the resulting array has a dtype of uint8 and a domain of 0-255. Args: array: NumPy array representing the image domain: expected range of values in array, defaults to (0, 1), if explicitly set to None will use the array's own range of values and normalize them. Returns: normalized PIL.Image
def main(args=None): """The main function.""" parser = argparse.ArgumentParser( description='Fritz!Box Smarthome CLI tool.') parser.add_argument('-v', action='store_true', dest='verbose', help='be more verbose') parser.add_argument('-f', '--fritzbox', type=str, dest='host', help='Fritz!Box IP address', default='fritz.box') parser.add_argument('-u', '--user', type=str, dest='user', help='Username') parser.add_argument('-p', '--password', type=str, dest='password', help='Username') parser.add_argument('-a', '--ain', type=str, dest='ain', help='Actor Identification', default=None) parser.add_argument('-V', '--version', action='version', version='{version}'.format(version=__version__), help='Print version') _sub = parser.add_subparsers(title='Commands') # list all devices subparser = _sub.add_parser('list', help='List all available devices') subparser.set_defaults(func=list_all) # device subparser = _sub.add_parser('device', help='Device/Actor commands') _sub_switch = subparser.add_subparsers() # device name subparser = _sub_switch.add_parser('name', help='get the device name') subparser.add_argument('ain', type=str, metavar="AIN", help='Actor Identification') subparser.set_defaults(func=device_name) # device presence subparser = _sub_switch.add_parser('present', help='get the device presence') subparser.add_argument('ain', type=str, metavar="AIN", help='Actor Identification') subparser.set_defaults(func=device_presence) # device stats subparser = _sub_switch.add_parser('stats', help='get the device statistics') subparser.add_argument('ain', type=str, metavar="AIN", help='Actor Identification') subparser.set_defaults(func=device_statistics) # switch subparser = _sub.add_parser('switch', help='Switch commands') _sub_switch = subparser.add_subparsers() # switch get subparser = _sub_switch.add_parser('get', help='get state') subparser.add_argument('ain', type=str, metavar="AIN", help='Actor Identification') subparser.set_defaults(func=switch_get) # switch on subparser = _sub_switch.add_parser('on', help='set on state') subparser.add_argument('ain', type=str, metavar="AIN", help='Actor Identification') subparser.set_defaults(func=switch_on) # switch off subparser = _sub_switch.add_parser('off', help='set off state') subparser.add_argument('ain', type=str, metavar="AIN", help='Actor Identification') subparser.set_defaults(func=switch_off) # switch toggle subparser = _sub_switch.add_parser('toggle', help='set off state') subparser.add_argument('ain', type=str, metavar="AIN", help='Actor Identification') subparser.set_defaults(func=switch_toggle) args = parser.parse_args(args) logging.basicConfig() if args.verbose: logging.getLogger('pyfritzhome').setLevel(logging.DEBUG) fritzbox = None try: fritzbox = Fritzhome(host=args.host, user=args.user, password=args.password) fritzbox.login() args.func(fritzbox, args) finally: if fritzbox is not None: fritzbox.logout()
The main function.
def download_url(self, timeout=60, name=None): """ Trigger a browse download :param timeout: int - Time in seconds to expire the download :param name: str - for LOCAL only, to rename the file being downloaded :return: str """ if "local" in self.driver.name.lower(): return url_for(SERVER_ENDPOINT, object_name=self.name, dl=1, name=name, _external=True) else: driver_name = self.driver.name.lower() expires = (datetime.datetime.now() + datetime.timedelta(seconds=timeout)).strftime("%s") if 's3' in driver_name or 'google' in driver_name: s2s = "GET\n\n\n{expires}\n/{object_name}"\ .format(expires=expires, object_name=self.path) h = hmac.new(self.driver.secret.encode('utf-8'), s2s.encode('utf-8'), hashlib.sha1) s = base64.encodestring(h.digest()).strip() _keyIdName = "AWSAccessKeyId" if "s3" in driver_name else "GoogleAccessId" params = { _keyIdName: self.driver.key, "Expires": expires, "Signature": s } urlkv = urlencode(params) return "%s?%s" % (self.secure_url, urlkv) elif 'cloudfiles' in driver_name: return self.driver.ex_get_object_temp_url(self._obj, method="GET", timeout=expires) else: raise NotImplemented("This provider '%s' doesn't support or " "doesn't have a signed url " "implemented yet" % self.provider_name)
Trigger a browse download :param timeout: int - Time in seconds to expire the download :param name: str - for LOCAL only, to rename the file being downloaded :return: str
def tcc(text: str) -> str: """ TCC generator, generates Thai Character Clusters :param str text: text to be tokenized to character clusters :return: subword (character cluster) """ if not text or not isinstance(text, str): return "" p = 0 while p < len(text): m = PAT_TCC.match(text[p:]) if m: n = m.span()[1] else: n = 1 yield text[p : p + n] p += n
TCC generator, generates Thai Character Clusters :param str text: text to be tokenized to character clusters :return: subword (character cluster)
def save(self, mark): """Save a position in this collection. :param mark: The position to save :type mark: Mark :raises: DBError, NoTrackingCollection """ self._check_exists() obj = mark.as_dict() try: # Make a 'filter' to find/update existing record, which uses # the field name and operation (but not the position). filt = {k: obj[k] for k in (mark.FLD_FLD, mark.FLD_OP)} _log.debug("save: upsert-spec={} upsert-obj={}".format(filt, obj)) self._track.update(filt, obj, upsert=True) except pymongo.errors.PyMongoError as err: raise DBError("{}".format(err))
Save a position in this collection. :param mark: The position to save :type mark: Mark :raises: DBError, NoTrackingCollection
def unauthenticate(self): """ Clears out any credentials, tokens, and service catalog info. """ self.username = "" self.password = "" self.tenant_id = "" self.tenant_name = "" self.token = "" self.expires = None self.region = "" self._creds_file = None self.api_key = "" self.services = utils.DotDict() self.regions = utils.DotDict() self.authenticated = False
Clears out any credentials, tokens, and service catalog info.
def create_halton_samples(order, dim=1, burnin=-1, primes=()): """ Create Halton sequence. For ``dim == 1`` the sequence falls back to Van Der Corput sequence. Args: order (int): The order of the Halton sequence. Defines the number of samples. dim (int): The number of dimensions in the Halton sequence. burnin (int): Skip the first ``burnin`` samples. If negative, the maximum of ``primes`` is used. primes (tuple): The (non-)prime base to calculate values along each axis. If empty, growing prime values starting from 2 will be used. Returns (numpy.ndarray): Halton sequence with ``shape == (dim, order)``. """ primes = list(primes) if not primes: prime_order = 10*dim while len(primes) < dim: primes = create_primes(prime_order) prime_order *= 2 primes = primes[:dim] assert len(primes) == dim, "not enough primes" if burnin < 0: burnin = max(primes) out = numpy.empty((dim, order)) indices = [idx+burnin for idx in range(order)] for dim_ in range(dim): out[dim_] = create_van_der_corput_samples( indices, number_base=primes[dim_]) return out
Create Halton sequence. For ``dim == 1`` the sequence falls back to Van Der Corput sequence. Args: order (int): The order of the Halton sequence. Defines the number of samples. dim (int): The number of dimensions in the Halton sequence. burnin (int): Skip the first ``burnin`` samples. If negative, the maximum of ``primes`` is used. primes (tuple): The (non-)prime base to calculate values along each axis. If empty, growing prime values starting from 2 will be used. Returns (numpy.ndarray): Halton sequence with ``shape == (dim, order)``.
def write_hdf5_array(array, h5g, path=None, attrs=None, append=False, overwrite=False, compression='gzip', **kwargs): """Write the ``array`` to an `h5py.Dataset` Parameters ---------- array : `gwpy.types.Array` the data object to write h5g : `str`, `h5py.Group` a file path to write to, or an `h5py.Group` in which to create a new dataset path : `str`, optional the path inside the group at which to create the new dataset, defaults to ``array.name`` attrs : `dict`, optional extra metadata to write into `h5py.Dataset.attrs`, on top of the default metadata append : `bool`, default: `False` if `True`, write new dataset to existing file, otherwise an exception will be raised if the output file exists (only used if ``f`` is `str`) overwrite : `bool`, default: `False` if `True`, overwrite an existing dataset in an existing file, otherwise an exception will be raised if a dataset exists with the given name (only used if ``f`` is `str`) compression : `str`, `int`, optional compression option to pass to :meth:`h5py.Group.create_dataset` **kwargs other keyword arguments for :meth:`h5py.Group.create_dataset` Returns ------- datasets : `h5py.Dataset` the newly created dataset """ if path is None: path = array.name if path is None: raise ValueError("Cannot determine HDF5 path for %s, " "please set ``name`` attribute, or pass ``path=`` " "keyword when writing" % type(array).__name__) # create dataset dset = io_hdf5.create_dataset(h5g, path, overwrite=overwrite, data=array.value, compression=compression, **kwargs) # write default metadata write_array_metadata(dset, array) # allow caller to specify their own metadata dict if attrs: for key in attrs: dset.attrs[key] = attrs[key] return dset
Write the ``array`` to an `h5py.Dataset` Parameters ---------- array : `gwpy.types.Array` the data object to write h5g : `str`, `h5py.Group` a file path to write to, or an `h5py.Group` in which to create a new dataset path : `str`, optional the path inside the group at which to create the new dataset, defaults to ``array.name`` attrs : `dict`, optional extra metadata to write into `h5py.Dataset.attrs`, on top of the default metadata append : `bool`, default: `False` if `True`, write new dataset to existing file, otherwise an exception will be raised if the output file exists (only used if ``f`` is `str`) overwrite : `bool`, default: `False` if `True`, overwrite an existing dataset in an existing file, otherwise an exception will be raised if a dataset exists with the given name (only used if ``f`` is `str`) compression : `str`, `int`, optional compression option to pass to :meth:`h5py.Group.create_dataset` **kwargs other keyword arguments for :meth:`h5py.Group.create_dataset` Returns ------- datasets : `h5py.Dataset` the newly created dataset
def to_dict(obj): """ If value wasn't isn't a primitive scalar or collection then it needs to either implement to_dict (instances of Serializable) or has member data matching each required arg of __init__. """ if isinstance(obj, dict): return obj elif hasattr(obj, "to_dict"): return obj.to_dict() try: return simple_object_to_dict(obj) except: raise ValueError( "Cannot convert %s : %s to dictionary" % ( obj, type(obj)))
If value wasn't isn't a primitive scalar or collection then it needs to either implement to_dict (instances of Serializable) or has member data matching each required arg of __init__.
def rnn(name, input, state, kernel, bias, new_state, number_of_gates = 2): ''' - Ht = f(Xt*Wi + Ht_1*Ri + Wbi + Rbi) ''' nn = Build(name) nn.tanh( nn.mad(kernel=kernel, bias=bias, x=nn.concat(input, state)), out=new_state); return nn.layers;
- Ht = f(Xt*Wi + Ht_1*Ri + Wbi + Rbi)
def download_pic(self, filename: str, url: str, mtime: datetime, filename_suffix: Optional[str] = None, _attempt: int = 1) -> bool: """Downloads and saves picture with given url under given directory with given timestamp. Returns true, if file was actually downloaded, i.e. updated.""" urlmatch = re.search('\\.[a-z0-9]*\\?', url) file_extension = url[-3:] if urlmatch is None else urlmatch.group(0)[1:-1] if filename_suffix is not None: filename += '_' + filename_suffix filename += '.' + file_extension # A post is considered "commited" if the json file exists and is not malformed. if self.commit_mode: if self._committed and os.path.isfile(filename): self.context.log(filename + ' exists', end=' ', flush=True) return False else: if os.path.isfile(filename): self.context.log(filename + ' exists', end=' ', flush=True) return False self.context.get_and_write_raw(url, filename) os.utime(filename, (datetime.now().timestamp(), mtime.timestamp())) return True
Downloads and saves picture with given url under given directory with given timestamp. Returns true, if file was actually downloaded, i.e. updated.
def visit_Call(self, node): ''' Resulting node alias to the return_alias of called function, if the function is already known by Pythran (i.e. it's an Intrinsic) or if Pythran already computed it's ``return_alias`` behavior. >>> from pythran import passmanager >>> pm = passmanager.PassManager('demo') >>> fun = """ ... def f(a): return a ... def foo(b): c = f(b)""" >>> module = ast.parse(fun) The ``f`` function create aliasing between the returned value and its first argument. >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Call) f(b) => ['b'] This also works with intrinsics, e.g ``dict.setdefault`` which may create alias between its third argument and the return value. >>> fun = 'def foo(a, d): __builtin__.dict.setdefault(d, 0, a)' >>> module = ast.parse(fun) >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Call) __builtin__.dict.setdefault(d, 0, a) => ['<unbound-value>', 'a'] Note that complex cases can arise, when one of the formal parameter is already known to alias to various values: >>> fun = """ ... def f(a, b): return a and b ... def foo(A, B, C, D): return f(A or B, C or D)""" >>> module = ast.parse(fun) >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Call) f((A or B), (C or D)) => ['A', 'B', 'C', 'D'] ''' self.generic_visit(node) f = node.func # special handler for bind functions if isinstance(f, ast.Attribute) and f.attr == "partial": return self.add(node, {node}) else: return_alias = self.call_return_alias(node) # expand collected aliases all_aliases = set() for value in return_alias: # no translation if isinstance(value, (ContainerOf, ast.FunctionDef, Intrinsic)): all_aliases.add(value) elif value in self.result: all_aliases.update(self.result[value]) else: try: ap = Aliases.access_path(value) all_aliases.update(self.aliases.get(ap, ())) except NotImplementedError: # should we do something better here? all_aliases.add(value) return self.add(node, all_aliases)
Resulting node alias to the return_alias of called function, if the function is already known by Pythran (i.e. it's an Intrinsic) or if Pythran already computed it's ``return_alias`` behavior. >>> from pythran import passmanager >>> pm = passmanager.PassManager('demo') >>> fun = """ ... def f(a): return a ... def foo(b): c = f(b)""" >>> module = ast.parse(fun) The ``f`` function create aliasing between the returned value and its first argument. >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Call) f(b) => ['b'] This also works with intrinsics, e.g ``dict.setdefault`` which may create alias between its third argument and the return value. >>> fun = 'def foo(a, d): __builtin__.dict.setdefault(d, 0, a)' >>> module = ast.parse(fun) >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Call) __builtin__.dict.setdefault(d, 0, a) => ['<unbound-value>', 'a'] Note that complex cases can arise, when one of the formal parameter is already known to alias to various values: >>> fun = """ ... def f(a, b): return a and b ... def foo(A, B, C, D): return f(A or B, C or D)""" >>> module = ast.parse(fun) >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Call) f((A or B), (C or D)) => ['A', 'B', 'C', 'D']
def _handle_temporary_connection(self, old_sink, new_sink, of_target=True): """Connect connection to new_sink If new_sink is set, the connection origin or target will be set to new_sink. The connection to old_sink is being removed. :param gaphas.aspect.ConnectionSink old_sink: Old sink (if existing) :param gaphas.aspect.ConnectionSink new_sink: New sink (if existing) :param bool of_target: Whether the origin or target will be reconnected :return: """ def sink_set_and_differs(sink_a, sink_b): if not sink_a: return False if not sink_b: return True if sink_a.port != sink_b.port: return True return False if sink_set_and_differs(old_sink, new_sink): sink_port_v = old_sink.port.port_v self._disconnect_temporarily(sink_port_v, target=of_target) if sink_set_and_differs(new_sink, old_sink): sink_port_v = new_sink.port.port_v self._connect_temporarily(sink_port_v, target=of_target)
Connect connection to new_sink If new_sink is set, the connection origin or target will be set to new_sink. The connection to old_sink is being removed. :param gaphas.aspect.ConnectionSink old_sink: Old sink (if existing) :param gaphas.aspect.ConnectionSink new_sink: New sink (if existing) :param bool of_target: Whether the origin or target will be reconnected :return:
def access_view(name, **kwargs): """ Shows ACL for the specified service. """ ctx = Context(**kwargs) ctx.execute_action('access:view', **{ 'unicorn': ctx.repo.create_secure_service('unicorn'), 'service': name, })
Shows ACL for the specified service.
def cudnnSetPooling2dDescriptor(poolingDesc, mode, windowHeight, windowWidth, verticalPadding, horizontalPadding, verticalStride, horizontalStride): """" Initialize a 2D pooling descriptor. This function initializes a previously created pooling descriptor object. Parameters ---------- poolingDesc : cudnnPoolingDescriptor Handle to a previously created pooling descriptor. mode : cudnnPoolingMode Enumerant to specify the pooling mode. windowHeight : int Height of the pooling window. windowWidth : int Width of the pooling window. verticalPadding: int Size of vertical padding. horizontalPadding: int Size of horizontal padding. verticalStride : int Pooling vertical stride. horizontalStride : int Pooling horizontal stride. """ status = _libcudnn.cudnnSetPooling2dDescriptor(poolingDesc, mode, windowHeight, windowWidth, verticalPadding, horizontalPadding, verticalStride, horizontalStride) cudnnCheckStatus(status)
Initialize a 2D pooling descriptor. This function initializes a previously created pooling descriptor object. Parameters ---------- poolingDesc : cudnnPoolingDescriptor Handle to a previously created pooling descriptor. mode : cudnnPoolingMode Enumerant to specify the pooling mode. windowHeight : int Height of the pooling window. windowWidth : int Width of the pooling window. verticalPadding: int Size of vertical padding. horizontalPadding: int Size of horizontal padding. verticalStride : int Pooling vertical stride. horizontalStride : int Pooling horizontal stride.
def is_uncertainty_edition_allowed(self, analysis_brain): """Checks if the edition of the uncertainty field is allowed :param analysis_brain: Brain that represents an analysis :return: True if the user can edit the result field, otherwise False """ # Only allow to edit the uncertainty if result edition is allowed if not self.is_result_edition_allowed(analysis_brain): return False # Get the ananylsis object obj = api.get_object(analysis_brain) # Manual setting of uncertainty is not allowed if not obj.getAllowManualUncertainty(): return False # Result is a detection limit -> uncertainty setting makes no sense! if obj.getDetectionLimitOperand() in [LDL, UDL]: return False return True
Checks if the edition of the uncertainty field is allowed :param analysis_brain: Brain that represents an analysis :return: True if the user can edit the result field, otherwise False
def load_swagger_spec(self, filepath=None): """ Loads the origin_spec from a local JSON file. If `filepath` is not provided, then the class `file_spec` format will be used to create the file-path value. """ if filepath is True or filepath is None: filepath = self.file_spec.format(server=self.server) return json.load(open(filepath))
Loads the origin_spec from a local JSON file. If `filepath` is not provided, then the class `file_spec` format will be used to create the file-path value.
def dict_conf(filename): """ Return dict object for *.conf file """ f, ext = os.path.splitext(filename) ext = ext.lower() if ext == "conf" or ext == "ini": # python config via config parser config = ConfigParser() config.optionxform=str config.read(filename) rv = {} for section in config.sections(): rv[section] = {} for key,value in config.items(section): rv[section][key] = value.strip('"').strip("'").decode("string_escape") return rv else: # other type of config, use munge if munge_config: src = munge_config.parse_url(filename) return src.cls().load(open(filename)).get("vodka") else: raise Exception("'%s' type of config encountered, install munge" % ext)
Return dict object for *.conf file
def fetch_samples(proj, selector_attribute=None, selector_include=None, selector_exclude=None): """ Collect samples of particular protocol(s). Protocols can't be both positively selected for and negatively selected against. That is, it makes no sense and is not allowed to specify both selector_include and selector_exclude protocols. On the other hand, if neither is provided, all of the Project's Samples are returned. If selector_include is specified, Samples without a protocol will be excluded, but if selector_exclude is specified, protocol-less Samples will be included. :param Project proj: the Project with Samples to fetch :param Project str: the sample selector_attribute to select for :param Iterable[str] | str selector_include: protocol(s) of interest; if specified, a Sample must :param Iterable[str] | str selector_exclude: protocol(s) to include :return list[Sample]: Collection of this Project's samples with protocol that either matches one of those in selector_include, or either lacks a protocol or does not match one of those in selector_exclude :raise TypeError: if both selector_include and selector_exclude protocols are specified; TypeError since it's basically providing two arguments when only one is accepted, so remain consistent with vanilla Python2 """ if selector_attribute is None or (not selector_include and not selector_exclude): # Simple; keep all samples. In this case, this function simply # offers a list rather than an iterator. return list(proj.samples) # At least one of the samples has to have the specified attribute if proj.samples and not any([hasattr(i, selector_attribute) for i in proj.samples]): raise AttributeError("The Project samples do not have the attribute '{attr}'" .format(attr=selector_attribute)) # Intersection between selector_include and selector_exclude is nonsense user error. if selector_include and selector_exclude: raise TypeError("Specify only selector_include or selector_exclude parameter, " "not both.") # Ensure that we're working with sets. def make_set(items): if isinstance(items, str): items = [items] return items # Use the attr check here rather than exception block in case the # hypothetical AttributeError would occur; we want such # an exception to arise, not to catch it as if the Sample lacks "protocol" if not selector_include: # Loose; keep all samples not in the selector_exclude. def keep(s): return not hasattr(s, selector_attribute) or \ getattr(s, selector_attribute) not in make_set(selector_exclude) else: # Strict; keep only samples in the selector_include. def keep(s): return hasattr(s, selector_attribute) and \ getattr(s, selector_attribute) in make_set(selector_include) return list(filter(keep, proj.samples))
Collect samples of particular protocol(s). Protocols can't be both positively selected for and negatively selected against. That is, it makes no sense and is not allowed to specify both selector_include and selector_exclude protocols. On the other hand, if neither is provided, all of the Project's Samples are returned. If selector_include is specified, Samples without a protocol will be excluded, but if selector_exclude is specified, protocol-less Samples will be included. :param Project proj: the Project with Samples to fetch :param Project str: the sample selector_attribute to select for :param Iterable[str] | str selector_include: protocol(s) of interest; if specified, a Sample must :param Iterable[str] | str selector_exclude: protocol(s) to include :return list[Sample]: Collection of this Project's samples with protocol that either matches one of those in selector_include, or either lacks a protocol or does not match one of those in selector_exclude :raise TypeError: if both selector_include and selector_exclude protocols are specified; TypeError since it's basically providing two arguments when only one is accepted, so remain consistent with vanilla Python2
def __end_of_list(self, ast_token): """Handle end of a list.""" self.list_level -= 1 if self.list_level == 0: if self.list_entry is not None: self.final_ast_tokens.append(self.list_entry) self.list_entry = None self.final_ast_tokens.append(ast_token)
Handle end of a list.
def maxCtxSubtable(maxCtx, tag, lookupType, st): """Calculate usMaxContext based on a single lookup table (and an existing max value). """ # single positioning, single / multiple substitution if (tag == 'GPOS' and lookupType == 1) or ( tag == 'GSUB' and lookupType in (1, 2, 3)): maxCtx = max(maxCtx, 1) # pair positioning elif tag == 'GPOS' and lookupType == 2: maxCtx = max(maxCtx, 2) # ligatures elif tag == 'GSUB' and lookupType == 4: for ligatures in st.ligatures.values(): for ligature in ligatures: maxCtx = max(maxCtx, ligature.CompCount) # context elif (tag == 'GPOS' and lookupType == 7) or ( tag == 'GSUB' and lookupType == 5): maxCtx = maxCtxContextualSubtable( maxCtx, st, 'Pos' if tag == 'GPOS' else 'Sub') # chained context elif (tag == 'GPOS' and lookupType == 8) or ( tag == 'GSUB' and lookupType == 6): maxCtx = maxCtxContextualSubtable( maxCtx, st, 'Pos' if tag == 'GPOS' else 'Sub', 'Chain') # extensions elif (tag == 'GPOS' and lookupType == 9) or ( tag == 'GSUB' and lookupType == 7): maxCtx = maxCtxSubtable( maxCtx, tag, st.ExtensionLookupType, st.ExtSubTable) # reverse-chained context elif tag == 'GSUB' and lookupType == 8: maxCtx = maxCtxContextualRule(maxCtx, st, 'Reverse') return maxCtx
Calculate usMaxContext based on a single lookup table (and an existing max value).
def locate_arcgis(): ''' Find the path to the ArcGIS Desktop installation. Keys to check: HLKM/SOFTWARE/ESRI/ArcGIS 'RealVersion' - will give the version, then we can use that to go to HKLM/SOFTWARE/ESRI/DesktopXX.X 'InstallDir'. Where XX.X is the version We may need to check HKLM/SOFTWARE/Wow6432Node/ESRI instead ''' try: key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432Node\\ESRI\\ArcGIS', 0) version = _winreg.QueryValueEx(key, "RealVersion")[0][:4] key_string = "SOFTWARE\\Wow6432Node\\ESRI\\Desktop{0}".format(version) desktop_key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, key_string, 0) install_dir = _winreg.QueryValueEx(desktop_key, "InstallDir")[0] return install_dir except WindowsError: raise ImportError("Could not locate the ArcGIS directory on this machine")
Find the path to the ArcGIS Desktop installation. Keys to check: HLKM/SOFTWARE/ESRI/ArcGIS 'RealVersion' - will give the version, then we can use that to go to HKLM/SOFTWARE/ESRI/DesktopXX.X 'InstallDir'. Where XX.X is the version We may need to check HKLM/SOFTWARE/Wow6432Node/ESRI instead
def validate_polygon(obj): """ Make sure an input can be returned as a valid polygon. Parameters ------------- obj : shapely.geometry.Polygon, str (wkb), or (n, 2) float Object which might be a polygon Returns ------------ polygon : shapely.geometry.Polygon Valid polygon object Raises ------------- ValueError If a valid finite- area polygon isn't available """ if isinstance(obj, Polygon): polygon = obj elif util.is_shape(obj, (-1, 2)): polygon = Polygon(obj) elif util.is_string(obj): polygon = load_wkb(obj) else: raise ValueError('Input not a polygon!') if (not polygon.is_valid or polygon.area < tol.zero): raise ValueError('Polygon is zero- area or invalid!') return polygon
Make sure an input can be returned as a valid polygon. Parameters ------------- obj : shapely.geometry.Polygon, str (wkb), or (n, 2) float Object which might be a polygon Returns ------------ polygon : shapely.geometry.Polygon Valid polygon object Raises ------------- ValueError If a valid finite- area polygon isn't available
def stop_process(self): """ Stop the process (by killing it). """ if self.process is not None: self._user_stop = True self.process.kill() self.setReadOnly(True) self._running = False
Stop the process (by killing it).
def _format_url(handler, host=None, core_name=None, extra=None): ''' PRIVATE METHOD Formats the URL based on parameters, and if cores are used or not handler : str The request handler to hit. host : str (None) The solr host to query. __opts__['host'] is default core_name : str (None) The name of the solr core if using cores. Leave this blank if you are not using cores or if you want to check all cores. extra : list<str> ([]) A list of name value pairs in string format. e.g. ['name=value'] Return: str Fully formatted URL (http://<host>:<port>/solr/<handler>?wt=json&<extra>) ''' extra = [] if extra is None else extra if _get_none_or_value(host) is None or host == 'None': host = __salt__['config.option']('solr.host') port = __salt__['config.option']('solr.port') baseurl = __salt__['config.option']('solr.baseurl') if _get_none_or_value(core_name) is None: if not extra: return "http://{0}:{1}{2}/{3}?wt=json".format( host, port, baseurl, handler) else: return "http://{0}:{1}{2}/{3}?wt=json&{4}".format( host, port, baseurl, handler, "&".join(extra)) else: if not extra: return "http://{0}:{1}{2}/{3}/{4}?wt=json".format( host, port, baseurl, core_name, handler) else: return "http://{0}:{1}{2}/{3}/{4}?wt=json&{5}".format( host, port, baseurl, core_name, handler, "&".join(extra))
PRIVATE METHOD Formats the URL based on parameters, and if cores are used or not handler : str The request handler to hit. host : str (None) The solr host to query. __opts__['host'] is default core_name : str (None) The name of the solr core if using cores. Leave this blank if you are not using cores or if you want to check all cores. extra : list<str> ([]) A list of name value pairs in string format. e.g. ['name=value'] Return: str Fully formatted URL (http://<host>:<port>/solr/<handler>?wt=json&<extra>)
def cmd_move(db=None): """Rename a database within a server. When used with --force, an existing database with the same name as DEST is replaced, the original is renamed out of place in the form DEST_old_YYYYMMDD (unless --no-backup is specified). """ if db is None: db = connect() pg_move_extended(db, args.src, args.dest)
Rename a database within a server. When used with --force, an existing database with the same name as DEST is replaced, the original is renamed out of place in the form DEST_old_YYYYMMDD (unless --no-backup is specified).
def _handle_response(self, request: Request, response: Response): '''Process a response.''' self._item_session.update_record_value(status_code=response.reply.code) is_listing = isinstance(response, ListingResponse) if is_listing and not self._processor.fetch_params.remove_listing or \ not is_listing: filename = self._file_writer_session.save_document(response) action = self._result_rule.handle_document(self._item_session, filename) else: self._file_writer_session.discard_document(response) action = self._result_rule.handle_no_document(self._item_session) if isinstance(response, ListingResponse): self._add_listing_links(response) return action
Process a response.
def get_table_rate_shipping_by_id(cls, table_rate_shipping_id, **kwargs): """Find TableRateShipping Return single instance of TableRateShipping by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_table_rate_shipping_by_id(table_rate_shipping_id, async=True) >>> result = thread.get() :param async bool :param str table_rate_shipping_id: ID of tableRateShipping to return (required) :return: TableRateShipping If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_table_rate_shipping_by_id_with_http_info(table_rate_shipping_id, **kwargs) else: (data) = cls._get_table_rate_shipping_by_id_with_http_info(table_rate_shipping_id, **kwargs) return data
Find TableRateShipping Return single instance of TableRateShipping by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_table_rate_shipping_by_id(table_rate_shipping_id, async=True) >>> result = thread.get() :param async bool :param str table_rate_shipping_id: ID of tableRateShipping to return (required) :return: TableRateShipping If the method is called asynchronously, returns the request thread.
def VcardFieldsEqual(field1, field2): """Handle comparing vCard fields where inputs are lists of components. Handle parameters? Are any used aside from 'TYPE'? Note: force cast to string to compare sub-objects like Name and Address """ field1_vals = set([ str(f.value) for f in field1 ]) field2_vals = set([ str(f.value) for f in field2 ]) if field1_vals == field2_vals: return True else: return False
Handle comparing vCard fields where inputs are lists of components. Handle parameters? Are any used aside from 'TYPE'? Note: force cast to string to compare sub-objects like Name and Address
def _variable_parts(self, line, codeline): """Return variable parts of the codeline, given the static parts.""" var_subs = [] # codeline has pattern and then has the outputs in different versions if codeline: var_subs = self._find_variable(codeline.pattern, line) else: # make variable part of the line string without all the other stuff line_str = self._strip_datetime(self._strip_counters(line)) var_subs = [line_str.strip()] return var_subs
Return variable parts of the codeline, given the static parts.
def bootstrap_histogram_1D( values, intervals, uncertainties=None, normalisation=False, number_bootstraps=None, boundaries=None): ''' Bootstrap samples a set of vectors :param numpy.ndarray values: The data values :param numpy.ndarray intervals: The bin edges :param numpy.ndarray uncertainties: The standard deviations of each observation :param bool normalisation: If True then returns the histogram as a density function :param int number_bootstraps: Number of bootstraps :param tuple boundaries: (Lower, Upper) bounds on the data :param returns: 1-D histogram of data ''' if not number_bootstraps or np.all(np.fabs(uncertainties < PRECISION)): # No bootstraps or all uncertaintes are zero - return ordinary # histogram #output = np.histogram(values, intervals)[0] output = hmtk_histogram_1D(values, intervals) if normalisation: output = output / float(np.sum(output)) else: output = output return output else: temp_hist = np.zeros([len(intervals) - 1, number_bootstraps], dtype=float) for iloc in range(0, number_bootstraps): sample = sample_truncated_gaussian_vector(values, uncertainties, boundaries) #output = np.histogram(sample, intervals)[0] output = hmtk_histogram_1D(sample, intervals) temp_hist[:, iloc] = output output = np.sum(temp_hist, axis=1) if normalisation: output = output / float(np.sum(output)) else: output = output / float(number_bootstraps) return output
Bootstrap samples a set of vectors :param numpy.ndarray values: The data values :param numpy.ndarray intervals: The bin edges :param numpy.ndarray uncertainties: The standard deviations of each observation :param bool normalisation: If True then returns the histogram as a density function :param int number_bootstraps: Number of bootstraps :param tuple boundaries: (Lower, Upper) bounds on the data :param returns: 1-D histogram of data
def _add_hook(self, socket, callback): """Generic hook. The passed socket has to be "receive only". """ self._hooks.append(socket) self._hooks_cb[socket] = callback if self.poller: self.poller.register(socket, POLLIN)
Generic hook. The passed socket has to be "receive only".
def writeInfo(self, location=None, masters=None): """ Write font into the current instance. Note: the masters attribute is ignored at the moment. """ if self.currentInstance is None: return infoElement = ET.Element("info") if location is not None: locationElement = self._makeLocationElement(location) infoElement.append(locationElement) self.currentInstance.append(infoElement)
Write font into the current instance. Note: the masters attribute is ignored at the moment.
def clean(self): ''' TinyMCE adds a placeholder <br> if no data is inserted. In this case, remove it. ''' cleaned_data = super(ManagerForm, self).clean() compensation = cleaned_data.get("compensation") duties = cleaned_data.get("duties") if compensation == '<br data-mce-bogus="1">': cleaned_data["compensation"] = "" if duties == '<br data-mce-bogus="1">': cleaned_data["duties"] = "" return cleaned_data
TinyMCE adds a placeholder <br> if no data is inserted. In this case, remove it.
def email_users(users, subject, text_body, html_body=None, sender=None, configuration=None, **kwargs): # type: (List['User'], str, str, Optional[str], Optional[str], Optional[Configuration], Any) -> None """Email a list of users Args: users (List[User]): List of users subject (str): Email subject text_body (str): Plain text email body html_body (str): HTML email body sender (Optional[str]): Email sender. Defaults to SMTP username. configuration (Optional[Configuration]): HDX configuration. Defaults to configuration of first user in list. **kwargs: See below mail_options (List): Mail options (see smtplib documentation) rcpt_options (List): Recipient options (see smtplib documentation) Returns: None """ if not users: raise ValueError('No users supplied') recipients = list() for user in users: recipients.append(user.data['email']) if configuration is None: configuration = users[0].configuration configuration.emailer().send(recipients, subject, text_body, html_body=html_body, sender=sender, **kwargs)
Email a list of users Args: users (List[User]): List of users subject (str): Email subject text_body (str): Plain text email body html_body (str): HTML email body sender (Optional[str]): Email sender. Defaults to SMTP username. configuration (Optional[Configuration]): HDX configuration. Defaults to configuration of first user in list. **kwargs: See below mail_options (List): Mail options (see smtplib documentation) rcpt_options (List): Recipient options (see smtplib documentation) Returns: None
def get_account_by_b58_address(self, b58_address: str, password: str) -> Account: """ :param b58_address: a base58 encode address. :param password: a password which is used to decrypt the encrypted private key. :return: """ acct = self.get_account_data_by_b58_address(b58_address) n = self.wallet_in_mem.scrypt.n salt = base64.b64decode(acct.salt) private_key = Account.get_gcm_decoded_private_key(acct.key, password, b58_address, salt, n, self.scheme) return Account(private_key, self.scheme)
:param b58_address: a base58 encode address. :param password: a password which is used to decrypt the encrypted private key. :return:
def start(self): """Activate a patch, returning any created mock.""" result = self.__enter__() self._active_patches.append(self) return result
Activate a patch, returning any created mock.
def romanize(text: str) -> str: """ Rendering Thai words in the Latin alphabet or "romanization", using the Royal Thai General System of Transcription (RTGS), which is the official system published by the Royal Institute of Thailand. ถอดเสียงภาษาไทยเป็นอักษรละติน :param str text: Thai text to be romanized :return: A string of Thai words rendered in the Latin alphabet. """ words = word_tokenize(text) romanized_words = [_romanize(word) for word in words] return "".join(romanized_words)
Rendering Thai words in the Latin alphabet or "romanization", using the Royal Thai General System of Transcription (RTGS), which is the official system published by the Royal Institute of Thailand. ถอดเสียงภาษาไทยเป็นอักษรละติน :param str text: Thai text to be romanized :return: A string of Thai words rendered in the Latin alphabet.
def parse_line(text): """ :param text: :type text: str :return: """ indent,text = calculate_indent(text) results = line_parser.parseString(text, parseAll=True).asList() return indent,results[0]
:param text: :type text: str :return:
def update_list_function(self, list_name, list_func): """ Modifies/overwrites an existing list function in the locally cached DesignDocument indexes dictionary. :param str list_name: Name used to identify the list function. :param str list_func: Javascript list function. """ if self.get_list_function(list_name) is None: raise CloudantArgumentError(113, list_name) self.lists.__setitem__(list_name, codify(list_func))
Modifies/overwrites an existing list function in the locally cached DesignDocument indexes dictionary. :param str list_name: Name used to identify the list function. :param str list_func: Javascript list function.
def enable_aliases_autocomplete(_, **kwargs): """ Enable aliases autocomplete by injecting aliases into Azure CLI tab completion list. """ external_completions = kwargs.get('external_completions', []) prefix = kwargs.get('cword_prefix', []) cur_commands = kwargs.get('comp_words', []) alias_table = get_alias_table() # Transform aliases if they are in current commands, # so parser can get the correct subparser when chaining aliases _transform_cur_commands(cur_commands, alias_table=alias_table) for alias, alias_command in filter_aliases(alias_table): if alias.startswith(prefix) and alias.strip() != prefix and _is_autocomplete_valid(cur_commands, alias_command): # Only autocomplete the first word because alias is space-delimited external_completions.append(alias) # Append spaces if necessary (https://github.com/kislyuk/argcomplete/blob/master/argcomplete/__init__.py#L552-L559) prequote = kwargs.get('cword_prequote', '') continuation_chars = "=/:" if len(external_completions) == 1 and external_completions[0][-1] not in continuation_chars and not prequote: external_completions[0] += ' '
Enable aliases autocomplete by injecting aliases into Azure CLI tab completion list.
def itemData(self, treeItem, column, role=Qt.DisplayRole): """ Returns the data stored under the given role for the item. O """ if role == Qt.DisplayRole: if column == self.COL_NODE_NAME: return treeItem.nodeName elif column == self.COL_NODE_PATH: return treeItem.nodePath elif column == self.COL_SHAPE: if treeItem.isSliceable: return " x ".join(str(elem) for elem in treeItem.arrayShape) else: return "" elif column == self.COL_IS_OPEN: # Only show for RTIs that actually open resources. # TODO: this must be clearer. Use CanFetchChildren? Set is Open to None by default? if treeItem.hasChildren(): return str(treeItem.isOpen) else: return "" elif column == self.COL_ELEM_TYPE: return treeItem.elementTypeName elif column == self.COL_FILE_NAME: return treeItem.fileName if hasattr(treeItem, 'fileName') else '' elif column == self.COL_UNIT: return treeItem.unit elif column == self.COL_MISSING_DATA: return to_string(treeItem.missingDataValue, noneFormat='') # empty str for Nones elif column == self.COL_RTI_TYPE: return type_name(treeItem) elif column == self.COL_EXCEPTION: return str(treeItem.exception) if treeItem.exception else '' else: raise ValueError("Invalid column: {}".format(column)) elif role == Qt.ToolTipRole: if treeItem.exception: return str(treeItem.exception) if column == self.COL_NODE_NAME: return treeItem.nodePath # Also path when hovering over the name elif column == self.COL_NODE_PATH: return treeItem.nodePath elif column == self.COL_SHAPE: if treeItem.isSliceable: return " x ".join(str(elem) for elem in treeItem.arrayShape) else: return "" elif column == self.COL_UNIT: return treeItem.unit elif column == self.COL_MISSING_DATA: return to_string(treeItem.missingDataValue, noneFormat='') # empty str for Nones elif column == self.COL_RTI_TYPE: return type_name(treeItem) elif column == self.COL_ELEM_TYPE: return treeItem.elementTypeName elif column == self.COL_FILE_NAME: return treeItem.fileName if hasattr(treeItem, 'fileName') else '' else: return None else: return super(RepoTreeModel, self).itemData(treeItem, column, role=role)
Returns the data stored under the given role for the item. O
def predict(self, trial_history): """predict the value of target position Parameters ---------- trial_history: list The history performance matrix of each trial. Returns ------- float expected final result performance of this hyperparameter config """ self.trial_history = trial_history self.point_num = len(trial_history) self.fit_theta() self.filter_curve() if self.effective_model_num < LEAST_FITTED_FUNCTION: # different curve's predictions are too scattered, requires more information return None self.mcmc_sampling() ret = 0 for i in range(NUM_OF_INSTANCE): ret += self.f_comb(self.target_pos, self.weight_samples[i]) return ret / NUM_OF_INSTANCE
predict the value of target position Parameters ---------- trial_history: list The history performance matrix of each trial. Returns ------- float expected final result performance of this hyperparameter config
def properties(self): """ Property for accessing property (doh!) manager of the current job. :return: instance of :class:`yagocd.resources.property.PropertyManager` :rtype: yagocd.resources.property.PropertyManager """ return PropertyManager( session=self._session, pipeline_name=self.pipeline_name, pipeline_counter=self.pipeline_counter, stage_name=self.stage_name, stage_counter=self.stage_counter, job_name=self.data.name )
Property for accessing property (doh!) manager of the current job. :return: instance of :class:`yagocd.resources.property.PropertyManager` :rtype: yagocd.resources.property.PropertyManager
def _patch_property(self, name, value): """Update field of this object's properties. This method will only update the field provided and will not touch the other fields. It **will not** reload the properties from the server. The behavior is local only and syncing occurs via :meth:`patch`. :type name: str :param name: The field name to update. :type value: object :param value: The value being updated. """ self._changes.add(name) self._properties[name] = value
Update field of this object's properties. This method will only update the field provided and will not touch the other fields. It **will not** reload the properties from the server. The behavior is local only and syncing occurs via :meth:`patch`. :type name: str :param name: The field name to update. :type value: object :param value: The value being updated.
def end_comma(self, value): """Validate and set the comma termination flag.""" if not isinstance(value, bool): raise TypeError('end_comma attribute must be a logical type.') self._end_comma = value
Validate and set the comma termination flag.
def format_listeners(elb_settings=None, env='dev', region='us-east-1'): """Format ELB Listeners into standard list. Args: elb_settings (dict): ELB settings including ELB Listeners to add, e.g.:: # old { "certificate": null, "i_port": 8080, "lb_port": 80, "subnet_purpose": "internal", "target": "HTTP:8080/health" } # new { "ports": [ { "instance": "HTTP:8080", "loadbalancer": "HTTP:80" }, { "certificate": "cert_name", "instance": "HTTP:8443", "loadbalancer": "HTTPS:443" } ], "subnet_purpose": "internal", "target": "HTTP:8080/health" } env (str): Environment to find the Account Number for. Returns: list: ELB Listeners formatted into dicts for Spinnaker:: [ { 'externalPort': 80, 'externalProtocol': 'HTTP', 'internalPort': 8080, 'internalProtocol': 'HTTP', 'sslCertificateId': None, 'listenerPolicies': [], 'backendPolicies': [] }, ... ] """ LOG.debug('ELB settings:\n%s', elb_settings) credential = get_env_credential(env=env) account = credential['accountId'] listeners = [] if 'ports' in elb_settings: for listener in elb_settings['ports']: cert_name = format_cert_name( env=env, region=region, account=account, certificate=listener.get('certificate', None)) lb_proto, lb_port = listener['loadbalancer'].split(':') i_proto, i_port = listener['instance'].split(':') listener_policies = listener.get('policies', []) listener_policies += listener.get('listener_policies', []) backend_policies = listener.get('backend_policies', []) elb_data = { 'externalPort': int(lb_port), 'externalProtocol': lb_proto.upper(), 'internalPort': int(i_port), 'internalProtocol': i_proto.upper(), 'sslCertificateId': cert_name, 'listenerPolicies': listener_policies, 'backendPolicies': backend_policies, } listeners.append(elb_data) else: listener_policies = elb_settings.get('policies', []) listener_policies += elb_settings.get('listener_policies', []) backend_policies = elb_settings.get('backend_policies', []) listeners = [{ 'externalPort': int(elb_settings['lb_port']), 'externalProtocol': elb_settings['lb_proto'], 'internalPort': int(elb_settings['i_port']), 'internalProtocol': elb_settings['i_proto'], 'sslCertificateId': elb_settings['certificate'], 'listenerPolicies': listener_policies, 'backendPolicies': backend_policies, }] for listener in listeners: LOG.info('ELB Listener:\n' 'loadbalancer %(externalProtocol)s:%(externalPort)d\n' 'instance %(internalProtocol)s:%(internalPort)d\n' 'certificate: %(sslCertificateId)s\n' 'listener_policies: %(listenerPolicies)s\n' 'backend_policies: %(backendPolicies)s', listener) return listeners
Format ELB Listeners into standard list. Args: elb_settings (dict): ELB settings including ELB Listeners to add, e.g.:: # old { "certificate": null, "i_port": 8080, "lb_port": 80, "subnet_purpose": "internal", "target": "HTTP:8080/health" } # new { "ports": [ { "instance": "HTTP:8080", "loadbalancer": "HTTP:80" }, { "certificate": "cert_name", "instance": "HTTP:8443", "loadbalancer": "HTTPS:443" } ], "subnet_purpose": "internal", "target": "HTTP:8080/health" } env (str): Environment to find the Account Number for. Returns: list: ELB Listeners formatted into dicts for Spinnaker:: [ { 'externalPort': 80, 'externalProtocol': 'HTTP', 'internalPort': 8080, 'internalProtocol': 'HTTP', 'sslCertificateId': None, 'listenerPolicies': [], 'backendPolicies': [] }, ... ]
def predict_density(self, Fmu, Fvar, Y): r""" Given a Normal distribution for the latent function, and a datum Y, compute the log predictive density of Y. i.e. if q(f) = N(Fmu, Fvar) and this object represents p(y|f) then this method computes the predictive density \log \int p(y=Y|f)q(f) df Here, we implement a default Gauss-Hermite quadrature routine, but some likelihoods (Gaussian, Poisson) will implement specific cases. """ return ndiagquad(self.logp, self.num_gauss_hermite_points, Fmu, Fvar, logspace=True, Y=Y)
r""" Given a Normal distribution for the latent function, and a datum Y, compute the log predictive density of Y. i.e. if q(f) = N(Fmu, Fvar) and this object represents p(y|f) then this method computes the predictive density \log \int p(y=Y|f)q(f) df Here, we implement a default Gauss-Hermite quadrature routine, but some likelihoods (Gaussian, Poisson) will implement specific cases.
def watts2pascal(watts, cfm, fan_tot_eff): """convert and return inputs for E+ in pascal and m3/s""" bhp = watts2bhp(watts) return bhp2pascal(bhp, cfm, fan_tot_eff)
convert and return inputs for E+ in pascal and m3/s
def server_doc(self_or_cls, obj, doc=None): """ Get a bokeh Document with the plot attached. May supply an existing doc, otherwise bokeh.io.curdoc() is used to attach the plot to the global document instance. """ if not isinstance(obj, (Plot, BokehServerWidgets)): if not isinstance(self_or_cls, BokehRenderer) or self_or_cls.mode != 'server': renderer = self_or_cls.instance(mode='server') else: renderer = self_or_cls plot, _ = renderer._validate(obj, 'auto') else: plot = obj root = plot.state if isinstance(plot, BokehServerWidgets): plot = plot.plot if doc is None: doc = plot.document else: plot.document = doc plot.traverse(lambda x: attach_periodic(x), [GenericElementPlot]) doc.add_root(root) return doc
Get a bokeh Document with the plot attached. May supply an existing doc, otherwise bokeh.io.curdoc() is used to attach the plot to the global document instance.
def merge(dst, src, separator="/", afilter=None, flags=MERGE_ADDITIVE, _path=""): """Merge source into destination. Like dict.update() but performs deep merging. flags is an OR'ed combination of MERGE_ADDITIVE, MERGE_REPLACE, or MERGE_TYPESAFE. * MERGE_ADDITIVE : List objects are combined onto one long list (NOT a set). This is the default flag. * MERGE_REPLACE : Instead of combining list objects, when 2 list objects are at an equal depth of merge, replace the destination with the source. * MERGE_TYPESAFE : When 2 keys at equal levels are of different types, raise a TypeError exception. By default, the source replaces the destination in this situation. """ if afilter: # Having merge do its own afiltering is dumb, let search do the # heavy lifting for us. src = search(src, '**', afilter=afilter) return merge(dst, src) def _check_typesafe(obj1, obj2, key, path): if not key in obj1: return elif ( (flags & MERGE_TYPESAFE == MERGE_TYPESAFE) and (type(obj1[key]) != type(obj2[key]))): raise TypeError("Cannot merge objects of type {0} and {1} at {2}" "".format(type(obj1[key]), type(obj2[key]), path)) elif ( (flags & MERGE_TYPESAFE != MERGE_TYPESAFE) and (type(obj1[key]) != type(obj2[key]))): obj1.pop(key) if isinstance(src, MutableMapping): for (i, v) in enumerate(src): _check_typesafe(dst, src, v, separator.join([_path, str(v)])) if not v in dst: dst[v] = src[v] else: if not isinstance(src[v], (MutableMapping, MutableSequence)): dst[v] = src[v] else: merge(dst[v], src[v], afilter=afilter, flags=flags, _path=separator.join([_path, str(v)]), separator=separator) elif isinstance(src, MutableSequence): for (i, v) in enumerate(src): _check_typesafe(dst, src, i, separator.join([_path, str(i)])) dsti = i if ( flags & MERGE_ADDITIVE): dsti = len(dst) if dsti >= len(dst): dst += [None] * (dsti - (len(dst) - 1)) if dst[dsti] == None: dst[dsti] = src[i] else: if not isinstance(src[i], (MutableMapping, MutableSequence)): dst[dsti] = src[i] else: merge(dst[i], src[i], afilter=afilter, flags=flags, _path=separator.join([_path, str(i)]), separator=separator)
Merge source into destination. Like dict.update() but performs deep merging. flags is an OR'ed combination of MERGE_ADDITIVE, MERGE_REPLACE, or MERGE_TYPESAFE. * MERGE_ADDITIVE : List objects are combined onto one long list (NOT a set). This is the default flag. * MERGE_REPLACE : Instead of combining list objects, when 2 list objects are at an equal depth of merge, replace the destination with the source. * MERGE_TYPESAFE : When 2 keys at equal levels are of different types, raise a TypeError exception. By default, the source replaces the destination in this situation.
def _store_documentation(self, path, html, overwrite, quiet): """ Stores all documents on the file system. Target location is **path**. File name is the lowercase name of the document + .rst. """ echo("Storing groundwork application documents\n") echo("Application: %s" % self.app.name) echo("Number of documents: %s\n" % len(self.app.documents.get())) if not os.path.isabs(path): path = os.path.abspath(path) if not os.path.isdir(path): echo("Path %s is not a directory!" % path) sys.exit(1) if not os.path.exists(path): echo("Path %s does not exist" % path) sys.exit(1) for dirpath, dirnames, files in os.walk(path): if files: echo("Path %s is not empty!\n" % path) if not overwrite: sys.exit(1) documents = [] for key, document in self.app.documents.get().items(): file_extension = ".html" if html else ".rst" # lowers the name, removes all whitespaces and adds the file extension file_name_parts = key.lower().split() file_name = "".join(file_name_parts) file_name += file_extension documents.append((file_name, document)) echo("Going to write to following files:") for document in documents: echo(" %s" % document[0]) echo("\nTarget directory: %s" % path) answer = None while answer not in ["N", "Y"] and not quiet: answer = prompt("Shall we go on? [Y]es, [N]o: ").upper() if answer == "N": sys.exit(0) for document in documents: try: with open(os.path.join(path, document[0]), "w") as doc_file: doc_rendered = Environment().from_string(document[1].content).render(app=self.app, plugin=document[1].plugin) if html: output = publish_parts(doc_rendered, writer_name="html")['whole'] else: output = doc_rendered doc_file.write(output) except Exception as e: echo("%s error occurred: %s" % (document[0], e)) else: echo("%s stored." % document[0])
Stores all documents on the file system. Target location is **path**. File name is the lowercase name of the document + .rst.
def _eval_args(args): """Internal helper for get_args.""" res = [] for arg in args: if not isinstance(arg, tuple): res.append(arg) elif is_callable_type(arg[0]): callable_args = _eval_args(arg[1:]) if len(arg) == 2: res.append(Callable[[], callable_args[0]]) elif arg[1] is Ellipsis: res.append(Callable[..., callable_args[1]]) else: res.append(Callable[list(callable_args[:-1]), callable_args[-1]]) else: res.append(type(arg[0]).__getitem__(arg[0], _eval_args(arg[1:]))) return tuple(res)
Internal helper for get_args.
def GetHostMemPhysFreeMB(self): '''Undocumented.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetHostMemPhysFreeMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
Undocumented.
def unquote(str): """Remove quotes from a string.""" if len(str) > 1: if str.startswith('"') and str.endswith('"'): return str[1:-1].replace('\\\\', '\\').replace('\\"', '"') if str.startswith('<') and str.endswith('>'): return str[1:-1] return str
Remove quotes from a string.
def make_transient(std, DMmax, Amin=6., Amax=20., rmax=20., rmin=0., DMmin=0.): """ Produce a mock transient pulse source for the purposes of characterizing the detection success of the current pipeline. Assumes - Code to inject the transients does so by inserting at an array index - Noise level at the center of the data array is characteristic of the noise level throughout Input std - noise level in visibilities(?) at mid-point of segment DMmax - maximum DM at which mock transient can be inserted [pc/cm^3] Amin/Amax is amplitude in units of the std (calculated below) rmax/rmin is radius range in arcmin DMmin is min DM Returns loff - direction cosine offset of mock transient from phase center [radians] moff - direction cosine offset of mock transient from phase center [radians] A - amplitude of transient [std units] DM - dispersion measure of mock transient [pc/cm^3] """ rad_arcmin = math.pi/(180*60) phimin = 0.0 phimax = 2*math.pi # Amplitude of transient, done in units of the std # std is calculated assuming that noise level in the middle of the data, # at index d['readints']/2, is characteristic of that throughout the data A = random.uniform(Amin, Amax) * std # Position of transient, in direction cosines r = random.uniform(rmin, rmax) phi = random.uniform(phimin, phimax) loff = r*math.cos(phi) * rad_arcmin moff = r*math.sin(phi) * rad_arcmin # Dispersion measure DM = random.uniform(DMmin, DMmax) return loff, moff, A, DM
Produce a mock transient pulse source for the purposes of characterizing the detection success of the current pipeline. Assumes - Code to inject the transients does so by inserting at an array index - Noise level at the center of the data array is characteristic of the noise level throughout Input std - noise level in visibilities(?) at mid-point of segment DMmax - maximum DM at which mock transient can be inserted [pc/cm^3] Amin/Amax is amplitude in units of the std (calculated below) rmax/rmin is radius range in arcmin DMmin is min DM Returns loff - direction cosine offset of mock transient from phase center [radians] moff - direction cosine offset of mock transient from phase center [radians] A - amplitude of transient [std units] DM - dispersion measure of mock transient [pc/cm^3]
def network(n): """Validate a |Network|. Checks the TPM and connectivity matrix. """ tpm(n.tpm) connectivity_matrix(n.cm) if n.cm.shape[0] != n.size: raise ValueError("Connectivity matrix must be NxN, where N is the " "number of nodes in the network.") return True
Validate a |Network|. Checks the TPM and connectivity matrix.
def write_base (self, url_data): """Write url_data.base_ref.""" self.writeln(u"<tr><td>"+self.part("base")+u"</td><td>"+ cgi.escape(url_data.base_ref)+u"</td></tr>")
Write url_data.base_ref.
def calculate_anim(infiles, org_lengths): """Returns ANIm result dataframes for files in input directory. - infiles - paths to each input file - org_lengths - dictionary of input sequence lengths, keyed by sequence Finds ANI by the ANIm method, as described in Richter et al (2009) Proc Natl Acad Sci USA 106: 19126-19131 doi:10.1073/pnas.0906412106. All FASTA format files (selected by suffix) in the input directory are compared against each other, pairwise, using NUCmer (which must be in the path). NUCmer output is stored in the output directory. The NUCmer .delta file output is parsed to obtain an alignment length and similarity error count for every unique region alignment between the two organisms, as represented by the sequences in the FASTA files. These are processed to give matrices of aligned sequence lengths, average nucleotide identity (ANI) percentages, coverage (aligned percentage of whole genome), and similarity error cound for each pairwise comparison. """ logger.info("Running ANIm") logger.info("Generating NUCmer command-lines") deltadir = os.path.join(args.outdirname, ALIGNDIR["ANIm"]) logger.info("Writing nucmer output to %s", deltadir) # Schedule NUCmer runs if not args.skip_nucmer: joblist = anim.generate_nucmer_jobs( infiles, args.outdirname, nucmer_exe=args.nucmer_exe, filter_exe=args.filter_exe, maxmatch=args.maxmatch, jobprefix=args.jobprefix, ) if args.scheduler == "multiprocessing": logger.info("Running jobs with multiprocessing") if args.workers is None: logger.info("(using maximum number of available " + "worker threads)") else: logger.info("(using %d worker threads, if available)", args.workers) cumval = run_mp.run_dependency_graph( joblist, workers=args.workers, logger=logger ) logger.info("Cumulative return value: %d", cumval) if 0 < cumval: logger.warning( "At least one NUCmer comparison failed. " + "ANIm may fail." ) else: logger.info("All multiprocessing jobs complete.") else: logger.info("Running jobs with SGE") logger.info("Jobarray group size set to %d", args.sgegroupsize) run_sge.run_dependency_graph( joblist, logger=logger, jgprefix=args.jobprefix, sgegroupsize=args.sgegroupsize, sgeargs=args.sgeargs, ) else: logger.warning("Skipping NUCmer run (as instructed)!") # Process resulting .delta files logger.info("Processing NUCmer .delta files.") results = anim.process_deltadir(deltadir, org_lengths, logger=logger) if results.zero_error: # zero percentage identity error if not args.skip_nucmer and args.scheduler == "multiprocessing": if 0 < cumval: logger.error( "This has possibly been a NUCmer run failure, " + "please investigate" ) logger.error(last_exception()) sys.exit(1) else: logger.error( "This is possibly due to a NUCmer comparison " + "being too distant for use. Please consider " + "using the --maxmatch option." ) logger.error( "This is alternatively due to NUCmer run " + "failure, analysis will continue, but please " + "investigate." ) if not args.nocompress: logger.info("Compressing/deleting %s", deltadir) compress_delete_outdir(deltadir) # Return processed data from .delta files return results
Returns ANIm result dataframes for files in input directory. - infiles - paths to each input file - org_lengths - dictionary of input sequence lengths, keyed by sequence Finds ANI by the ANIm method, as described in Richter et al (2009) Proc Natl Acad Sci USA 106: 19126-19131 doi:10.1073/pnas.0906412106. All FASTA format files (selected by suffix) in the input directory are compared against each other, pairwise, using NUCmer (which must be in the path). NUCmer output is stored in the output directory. The NUCmer .delta file output is parsed to obtain an alignment length and similarity error count for every unique region alignment between the two organisms, as represented by the sequences in the FASTA files. These are processed to give matrices of aligned sequence lengths, average nucleotide identity (ANI) percentages, coverage (aligned percentage of whole genome), and similarity error cound for each pairwise comparison.
def resolve_path( self, path, root_id='0', objects=False ): '''Return id (or metadata) of an object, specified by chain (iterable or fs-style path string) of "name" attributes of it's ancestors, or raises DoesNotExists error. Requires a lot of calls to resolve each name in path, so use with care. root_id parameter allows to specify path relative to some folder_id (default: 0).''' if path: if isinstance(path, types.StringTypes): path = filter(None, path.split(os.sep)) if path: try: for i, name in enumerate(path): root_id = dict(it.imap( op.itemgetter('name', 'id'), (yield self.listdir(root_id)) ))[name] except (KeyError, ProtocolError) as err: if isinstance(err, ProtocolError) and err.code != 404: raise raise DoesNotExists(root_id, path[i:]) defer.returnValue(root_id if not objects else (yield self.info(root_id)))
Return id (or metadata) of an object, specified by chain (iterable or fs-style path string) of "name" attributes of it's ancestors, or raises DoesNotExists error. Requires a lot of calls to resolve each name in path, so use with care. root_id parameter allows to specify path relative to some folder_id (default: 0).
def get_revision_history(brain_or_object): """Get the revision history for the given brain or context. :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Workflow history :rtype: obj """ obj = get_object(brain_or_object) chv = ContentHistoryView(obj, safe_getattr(obj, "REQUEST", None)) return chv.fullHistory()
Get the revision history for the given brain or context. :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Workflow history :rtype: obj
def plotwrapper(f): """ This decorator allows for PyMC arguments of various types to be passed to the plotting functions. It identifies the type of object and locates its trace(s), then passes the data to the wrapped plotting function. """ def wrapper(pymc_obj, *args, **kwargs): start = 0 if 'start' in kwargs: start = kwargs.pop('start') # Figure out what type of object it is try: # First try Model type for variable in pymc_obj._variables_to_tally: # Plot object if variable._plot is not False: data = pymc_obj.trace(variable.__name__)[start:] if size(data[-1]) >= 10 and variable._plot != True: continue elif variable.dtype is dtype('object'): continue name = variable.__name__ if args: name = '%s_%s' % (args[0], variable.__name__) f(data, name, *args, **kwargs) return except AttributeError: pass try: # Then try Trace type data = pymc_obj()[:] name = pymc_obj.name f(data, name, *args, **kwargs) return except (AttributeError, TypeError): pass try: # Then try Node type if pymc_obj._plot is not False: data = pymc_obj.trace()[start:] # This is deprecated. DH name = pymc_obj.__name__ f(data, name, *args, **kwargs) return except AttributeError: pass if isinstance(pymc_obj, dict): # Then try dictionary for i in pymc_obj: data = pymc_obj[i][start:] if args: i = '%s_%s' % (args[0], i) elif 'name' in kwargs: i = '%s_%s' % (kwargs.pop('name'), i) f(data, i, *args, **kwargs) return # If others fail, assume that raw data is passed f(pymc_obj, *args, **kwargs) wrapper.__doc__ = f.__doc__ wrapper.__name__ = f.__name__ return wrapper
This decorator allows for PyMC arguments of various types to be passed to the plotting functions. It identifies the type of object and locates its trace(s), then passes the data to the wrapped plotting function.
def CallMethod(self, method, controller, request, response_class, done): '''Call the RPC method. The naming doesn't confirm PEP8, since it's a method called by protobuf ''' try: self.validate_request(request) if not self.sock: self.get_connection(self.host, self.port) self.send_rpc_message(method, request) byte_stream = self.recv_rpc_message() return self.parse_response(byte_stream, response_class) except RequestError: # Raise a request error, but don't close the socket raise except Exception: # All other errors close the socket self.close_socket() raise
Call the RPC method. The naming doesn't confirm PEP8, since it's a method called by protobuf
def _clean_index(self): "Clean index values after loading." for idx_name, idx_def in self.index_defs.items(): if idx_def['type'] == 'lazy': self.build_index(idx_name) for index_name, values in self.indexes.items(): for value in values: if not isinstance(values[value], set): values[value] = set(values[value])
Clean index values after loading.
def get_shapes_intersecting_geometry( feed: "Feed", geometry, geo_shapes=None, *, geometrized: bool = False ) -> DataFrame: """ Return the slice of ``feed.shapes`` that contains all shapes that intersect the given Shapely geometry, e.g. a Polygon or LineString. Parameters ---------- feed : Feed geometry : Shapley geometry, e.g. a Polygon Specified in WGS84 coordinates geo_shapes : GeoPandas GeoDataFrame The output of :func:`geometrize_shapes` geometrize : boolean If ``True``, then return the shapes DataFrame as a GeoDataFrame of the form output by :func:`geometrize_shapes` Returns ------- DataFrame or GeoDataFrame Notes ----- - Requires GeoPandas - Specifying ``geo_shapes`` will skip the first step of the algorithm, namely, geometrizing ``feed.shapes`` - Assume the following feed attributes are not ``None``: * ``feed.shapes``, if ``geo_shapes`` is not given """ if geo_shapes is not None: f = geo_shapes.copy() else: f = geometrize_shapes(feed.shapes) cols = f.columns f["hit"] = f["geometry"].intersects(geometry) f = f[f["hit"]][cols] if geometrized: return f else: return ungeometrize_shapes(f)
Return the slice of ``feed.shapes`` that contains all shapes that intersect the given Shapely geometry, e.g. a Polygon or LineString. Parameters ---------- feed : Feed geometry : Shapley geometry, e.g. a Polygon Specified in WGS84 coordinates geo_shapes : GeoPandas GeoDataFrame The output of :func:`geometrize_shapes` geometrize : boolean If ``True``, then return the shapes DataFrame as a GeoDataFrame of the form output by :func:`geometrize_shapes` Returns ------- DataFrame or GeoDataFrame Notes ----- - Requires GeoPandas - Specifying ``geo_shapes`` will skip the first step of the algorithm, namely, geometrizing ``feed.shapes`` - Assume the following feed attributes are not ``None``: * ``feed.shapes``, if ``geo_shapes`` is not given
def get_port_channel_detail_output_lacp_partner_system_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_port_channel_detail = ET.Element("get_port_channel_detail") config = get_port_channel_detail output = ET.SubElement(get_port_channel_detail, "output") lacp = ET.SubElement(output, "lacp") partner_system_id = ET.SubElement(lacp, "partner-system-id") partner_system_id.text = kwargs.pop('partner_system_id') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def hide_routemap_holder_route_map_content_set_automatic_tag_tag_empty(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy") route_map = ET.SubElement(hide_routemap_holder, "route-map") name_key = ET.SubElement(route_map, "name") name_key.text = kwargs.pop('name') action_rm_key = ET.SubElement(route_map, "action-rm") action_rm_key.text = kwargs.pop('action_rm') instance_key = ET.SubElement(route_map, "instance") instance_key.text = kwargs.pop('instance') content = ET.SubElement(route_map, "content") set = ET.SubElement(content, "set") automatic_tag = ET.SubElement(set, "automatic-tag") tag_empty = ET.SubElement(automatic_tag, "tag-empty") callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def __rv_french(self, word, vowels): """ Return the region RV that is used by the French stemmer. If the word begins with two vowels, RV is the region after the third letter. Otherwise, it is the region after the first vowel not at the beginning of the word, or the end of the word if these positions cannot be found. (Exceptionally, u'par', u'col' or u'tap' at the beginning of a word is also taken to define RV as the region to their right.) :param word: The French word whose region RV is determined. :type word: str or unicode :param vowels: The French vowels that are used to determine the region RV. :type vowels: unicode :return: the region RV for the respective French word. :rtype: unicode :note: This helper method is invoked by the stem method of the subclass FrenchStemmer. It is not to be invoked directly! """ rv = "" if len(word) >= 2: if (word.startswith(("par", "col", "tap")) or (word[0] in vowels and word[1] in vowels)): rv = word[3:] else: for i in range(1, len(word)): if word[i] in vowels: rv = word[i+1:] break return rv
Return the region RV that is used by the French stemmer. If the word begins with two vowels, RV is the region after the third letter. Otherwise, it is the region after the first vowel not at the beginning of the word, or the end of the word if these positions cannot be found. (Exceptionally, u'par', u'col' or u'tap' at the beginning of a word is also taken to define RV as the region to their right.) :param word: The French word whose region RV is determined. :type word: str or unicode :param vowels: The French vowels that are used to determine the region RV. :type vowels: unicode :return: the region RV for the respective French word. :rtype: unicode :note: This helper method is invoked by the stem method of the subclass FrenchStemmer. It is not to be invoked directly!
def load(path, include_core=True, path_in_arc=''): """ Loads a IOSystem or Extension previously saved with pymrio This function can be used to load a IOSystem or Extension specified in a metadata file (as defined in DEFAULT_FILE_NAMES['filepara']: metadata.json) DataFrames (tables) are loaded from text or binary pickle files. For the latter, the extension .pkl or .pickle is assumed, in all other case the tables are assumed to be in .txt format. Parameters ---------- path : pathlib.Path or string Path or path with para file name for the data to load. This must either point to the directory containing the uncompressed data or the location of a compressed zip file with the data. In the later case the parameter 'path_in_arc' need to be specific to further indicate the location of the data in the compressed file. include_core : boolean, optional If False the load method does not include A, L and Z matrix. This significantly reduces the required memory if the purpose is only to analyse the results calculated beforehand. path_in_arc: string, optional Path to the data in the zip file (where the fileparameters file is located). path_in_arc must be given without leading dot and slash; thus to point to the data in the root of the compressed file pass '', for data in e.g. the folder 'emissions' pass 'emissions/'. Only used if parameter 'path' points to an compressed zip file. Returns ------- IOSystem or Extension class depending on systemtype in the json file None in case of errors """ path = Path(path) if not path.exists(): raise ReadError('Given path does not exist') file_para = get_file_para(path=path, path_in_arc=path_in_arc) if file_para.content['systemtype'] == GENERIC_NAMES['iosys']: if zipfile.is_zipfile(str(path)): ret_system = IOSystem(meta=MRIOMetaData( location=path, path_in_arc=os.path.join(file_para.folder, DEFAULT_FILE_NAMES['metadata']))) ret_system.meta._add_fileio( "Loaded IO system from {} - {}".format(path, path_in_arc)) else: ret_system = IOSystem(meta=MRIOMetaData( location=path / DEFAULT_FILE_NAMES['metadata'])) ret_system.meta._add_fileio( "Loaded IO system from {}".format(path)) elif file_para.content['systemtype'] == GENERIC_NAMES['ext']: ret_system = Extension(file_para.content['name']) else: raise ReadError('Type of system no defined in the file parameters') return None for key in file_para.content['files']: if not include_core and key not in ['A', 'L', 'Z']: continue file_name = file_para.content['files'][key]['name'] nr_index_col = file_para.content['files'][key]['nr_index_col'] nr_header = file_para.content['files'][key]['nr_header'] _index_col = list(range(int(nr_index_col))) _header = list(range(int(nr_header))) _index_col = 0 if _index_col == [0] else _index_col _header = 0 if _header == [0] else _header if zipfile.is_zipfile(str(path)): full_file_name = os.path.join(file_para.folder, file_name) logging.info('Load data from {}'.format(full_file_name)) with zipfile.ZipFile(file=str(path)) as zf: if (os.path.splitext(str(full_file_name))[1] == '.pkl' or os.path.splitext(str(full_file_name))[1] == '.pickle'): setattr(ret_system, key, pd.read_pickle(zf.open(full_file_name))) else: setattr(ret_system, key, pd.read_table(zf.open(full_file_name), index_col=_index_col, header=_header)) else: full_file_name = path / file_name logging.info('Load data from {}'.format(full_file_name)) if (os.path.splitext(str(full_file_name))[1] == '.pkl' or os.path.splitext(str(full_file_name))[1] == '.pickle'): setattr(ret_system, key, pd.read_pickle(full_file_name)) else: setattr(ret_system, key, pd.read_table(full_file_name, index_col=_index_col, header=_header)) return ret_system
Loads a IOSystem or Extension previously saved with pymrio This function can be used to load a IOSystem or Extension specified in a metadata file (as defined in DEFAULT_FILE_NAMES['filepara']: metadata.json) DataFrames (tables) are loaded from text or binary pickle files. For the latter, the extension .pkl or .pickle is assumed, in all other case the tables are assumed to be in .txt format. Parameters ---------- path : pathlib.Path or string Path or path with para file name for the data to load. This must either point to the directory containing the uncompressed data or the location of a compressed zip file with the data. In the later case the parameter 'path_in_arc' need to be specific to further indicate the location of the data in the compressed file. include_core : boolean, optional If False the load method does not include A, L and Z matrix. This significantly reduces the required memory if the purpose is only to analyse the results calculated beforehand. path_in_arc: string, optional Path to the data in the zip file (where the fileparameters file is located). path_in_arc must be given without leading dot and slash; thus to point to the data in the root of the compressed file pass '', for data in e.g. the folder 'emissions' pass 'emissions/'. Only used if parameter 'path' points to an compressed zip file. Returns ------- IOSystem or Extension class depending on systemtype in the json file None in case of errors
def _list_ids(path_to_data): """List raw data IDs grouped by symbol ID from a pickle file ``path_to_data``.""" loaded = pickle.load(open(path_to_data, "rb")) raw_datasets = loaded['handwriting_datasets'] raw_ids = {} for raw_dataset in raw_datasets: raw_data_id = raw_dataset['handwriting'].raw_data_id if raw_dataset['formula_id'] not in raw_ids: raw_ids[raw_dataset['formula_id']] = [raw_data_id] else: raw_ids[raw_dataset['formula_id']].append(raw_data_id) for symbol_id in sorted(raw_ids): print("%i: %s" % (symbol_id, sorted(raw_ids[symbol_id])))
List raw data IDs grouped by symbol ID from a pickle file ``path_to_data``.
def add(self,attrlist,attrvalues): ''' add an attribute :parameter dimlist: list of dimensions :parameter dimvalues: list of values for dimlist ''' for i,d in enumerate(attrlist): self[d] = attrvalues[i]
add an attribute :parameter dimlist: list of dimensions :parameter dimvalues: list of values for dimlist
def visual_callback_2d(background, fig=None): """ Returns a callback than can be passed as the argument `iter_callback` of `morphological_geodesic_active_contour` and `morphological_chan_vese` for visualizing the evolution of the levelsets. Only works for 2D images. Parameters ---------- background : (M, N) array Image to be plotted as the background of the visual evolution. fig : matplotlib.figure.Figure Figure where results will be drawn. If not given, a new figure will be created. Returns ------- callback : Python function A function that receives a levelset and updates the current plot accordingly. This can be passed as the `iter_callback` argument of `morphological_geodesic_active_contour` and `morphological_chan_vese`. """ # Prepare the visual environment. if fig is None: fig = plt.figure() fig.clf() ax1 = fig.add_subplot(1, 2, 1) ax1.imshow(background, cmap=plt.cm.gray) ax2 = fig.add_subplot(1, 2, 2) ax_u = ax2.imshow(np.zeros_like(background), vmin=0, vmax=1) plt.pause(0.001) def callback(levelset): if ax1.collections: del ax1.collections[0] ax1.contour(levelset, [0.5], colors='r') ax_u.set_data(levelset) fig.canvas.draw() plt.pause(0.001) return callback
Returns a callback than can be passed as the argument `iter_callback` of `morphological_geodesic_active_contour` and `morphological_chan_vese` for visualizing the evolution of the levelsets. Only works for 2D images. Parameters ---------- background : (M, N) array Image to be plotted as the background of the visual evolution. fig : matplotlib.figure.Figure Figure where results will be drawn. If not given, a new figure will be created. Returns ------- callback : Python function A function that receives a levelset and updates the current plot accordingly. This can be passed as the `iter_callback` argument of `morphological_geodesic_active_contour` and `morphological_chan_vese`.
def cached_property(getter): """ Decorator that converts a method into memoized property. The decorator works as expected only for classes with attribute '__dict__' and immutable properties. """ @wraps(getter) def decorator(self): key = "_cached_property_" + getter.__name__ if not hasattr(self, key): setattr(self, key, getter(self)) return getattr(self, key) return property(decorator)
Decorator that converts a method into memoized property. The decorator works as expected only for classes with attribute '__dict__' and immutable properties.
def getDownloadUrls(self): """Return a list of the urls to download from""" data = self.searchIndex(False) fileUrls = [] for datum in data: fileUrl = self.formatDownloadUrl(datum[0]) fileUrls.append(fileUrl) return fileUrls
Return a list of the urls to download from
def error(self, message): """Override superclass to support customized error message. Error message needs to be rewritten in order to display visible commands only, when invalid command is called by user. Otherwise, hidden commands will be displayed in stderr, which is not expected. Refer the following argparse python documentation for detailed method information: http://docs.python.org/2/library/argparse.html#exiting-methods Args: message: original error message that will be printed to stderr """ # subcommands_quoted is the same as subcommands, except each value is # surrounded with double quotes. This is done to match the standard # output of the ArgumentParser, while hiding commands we don't want users # to use, as they are no longer documented and only here for legacy use. subcommands_quoted = ', '.join( [repr(command) for command in _VISIBLE_COMMANDS]) subcommands = ', '.join(_VISIBLE_COMMANDS) message = re.sub( r'(argument {%s}: invalid choice: .*) \(choose from (.*)\)$' % subcommands, r'\1 (choose from %s)' % subcommands_quoted, message) super(_EndpointsParser, self).error(message)
Override superclass to support customized error message. Error message needs to be rewritten in order to display visible commands only, when invalid command is called by user. Otherwise, hidden commands will be displayed in stderr, which is not expected. Refer the following argparse python documentation for detailed method information: http://docs.python.org/2/library/argparse.html#exiting-methods Args: message: original error message that will be printed to stderr
def cnvlGauss2D(idxPrc, aryBoxCar, aryMdlParamsChnk, tplPngSize, varNumVol, queOut): """Spatially convolve boxcar functions with 2D Gaussian. Parameters ---------- idxPrc : 2d numpy array, shape [n_samples, n_measurements] Description of input 1. aryBoxCar : float, positive Description of input 2. aryMdlParamsChnk : 2d numpy array, shape [n_samples, n_measurements] Description of input 1. tplPngSize : float, positive Description of input 2. varNumVol : 2d numpy array, shape [n_samples, n_measurements] Description of input 1. queOut : float, positive Description of input 2. Returns ------- data : 2d numpy array, shape [n_samples, n_measurements] Closed data. Reference --------- [1] """ # Number of combinations of model parameters in the current chunk: varChnkSze = np.size(aryMdlParamsChnk, axis=0) # Determine number of motion directions varNumMtnDrtn = aryBoxCar.shape[2] # Output array with pRF model time courses: aryOut = np.zeros([varChnkSze, varNumMtnDrtn, varNumVol]) # Loop through different motion directions: for idxMtn in range(0, varNumMtnDrtn): # Loop through combinations of model parameters: for idxMdl in range(0, varChnkSze): # Spatial parameters of current model: varTmpX = aryMdlParamsChnk[idxMdl, 1] varTmpY = aryMdlParamsChnk[idxMdl, 2] varTmpSd = aryMdlParamsChnk[idxMdl, 3] # Create pRF model (2D): aryGauss = crtGauss2D(tplPngSize[0], tplPngSize[1], varTmpX, varTmpY, varTmpSd) # Multiply pixel-time courses with Gaussian pRF models: aryPrfTcTmp = np.multiply(aryBoxCar[:, :, idxMtn, :], aryGauss[:, :, None]) # Calculate sum across x- and y-dimensions - the 'area under the # Gaussian surface'. This is essentially an unscaled version of the # pRF time course model (i.e. not yet scaled for size of the pRF). aryPrfTcTmp = np.sum(aryPrfTcTmp, axis=(0, 1)) # Put model time courses into function's output with 2d Gaussian # arrray: aryOut[idxMdl, idxMtn, :] = aryPrfTcTmp # Put column with the indicies of model-parameter-combinations into the # output array (in order to be able to put the pRF model time courses into # the correct order after the parallelised function): lstOut = [idxPrc, aryOut] # Put output to queue: queOut.put(lstOut)
Spatially convolve boxcar functions with 2D Gaussian. Parameters ---------- idxPrc : 2d numpy array, shape [n_samples, n_measurements] Description of input 1. aryBoxCar : float, positive Description of input 2. aryMdlParamsChnk : 2d numpy array, shape [n_samples, n_measurements] Description of input 1. tplPngSize : float, positive Description of input 2. varNumVol : 2d numpy array, shape [n_samples, n_measurements] Description of input 1. queOut : float, positive Description of input 2. Returns ------- data : 2d numpy array, shape [n_samples, n_measurements] Closed data. Reference --------- [1]
def export(self, class_name, method_name, export_data=False, export_dir='.', export_filename='data.json', export_append_checksum=False, **kwargs): """ Port a trained estimator to the syntax of a chosen programming language. Parameters ---------- :param class_name : string The name of the class in the returned result. :param method_name : string The name of the method in the returned result. :param export_data : bool, default: False Whether the model data should be saved or not. :param export_dir : string, default: '.' (current directory) The directory where the model data should be saved. :param export_filename : string, default: 'data.json' The filename of the exported model data. :param export_append_checksum : bool, default: False Whether to append the checksum to the filename or not. Returns ------- :return : string The transpiled algorithm with the defined placeholders. """ # Arguments: self.class_name = class_name self.method_name = method_name # Estimator: est = self.estimator # Basic parameters: self.metric = est.metric self.n_classes = len(est.classes_) self.n_templates = len(est._fit_X) # pylint: disable=W0212 self.n_features = len(est._fit_X[0]) # pylint: disable=W0212 self.n_neighbors = est.n_neighbors self.algorithm = est.algorithm self.power_param = est.p if self.algorithm != 'brute': from sklearn.neighbors.kd_tree import KDTree # pylint: disable-msg=E0611 from sklearn.neighbors.ball_tree import BallTree # pylint: disable-msg=E0611 tree = est._tree # pylint: disable=W0212 if isinstance(tree, (KDTree, BallTree)): self.tree = tree if self.target_method == 'predict': # Exported: if export_data and os.path.isdir(export_dir): self.export_data(export_dir, export_filename, export_append_checksum) return self.predict('exported') # Separated: return self.predict('separated')
Port a trained estimator to the syntax of a chosen programming language. Parameters ---------- :param class_name : string The name of the class in the returned result. :param method_name : string The name of the method in the returned result. :param export_data : bool, default: False Whether the model data should be saved or not. :param export_dir : string, default: '.' (current directory) The directory where the model data should be saved. :param export_filename : string, default: 'data.json' The filename of the exported model data. :param export_append_checksum : bool, default: False Whether to append the checksum to the filename or not. Returns ------- :return : string The transpiled algorithm with the defined placeholders.
def max(self, array, role = None): """ Return the maximum value of ``array`` for the entity members. ``array`` must have the dimension of the number of persons in the simulation If ``role`` is provided, only the entity member with the given role are taken into account. Example: >>> salaries = household.members('salary', '2018-01') # e.g. [2000, 1500, 0, 0, 0] >>> household.max(salaries) >>> array([2000]) """ return self.reduce(array, reducer = np.maximum, neutral_element = - np.infty, role = role)
Return the maximum value of ``array`` for the entity members. ``array`` must have the dimension of the number of persons in the simulation If ``role`` is provided, only the entity member with the given role are taken into account. Example: >>> salaries = household.members('salary', '2018-01') # e.g. [2000, 1500, 0, 0, 0] >>> household.max(salaries) >>> array([2000])
def bipartite_vertex_cover(bigraph): """Bipartite minimum vertex cover by Koenig's theorem :param bigraph: adjacency list, index = vertex in U, value = neighbor list in V :assumption: U = V = {0, 1, 2, ..., n - 1} for n = len(bigraph) :returns: boolean table for U, boolean table for V :comment: selected vertices form a minimum vertex cover, i.e. every edge is adjacent to at least one selected vertex and number of selected vertices is minimum :complexity: `O(|V|*|E|)` """ V = range(len(bigraph)) matchV = max_bipartite_matching(bigraph) matchU = [None for u in V] for v in V: # -- build the mapping from U to V if matchV[v] is not None: matchU[matchV[v]] = v visitU = [False for u in V] # -- build max alternating forest visitV = [False for v in V] for u in V: if matchU[u] is None: # -- starting with free vertices in U _alternate(u, bigraph, visitU, visitV, matchV) inverse = [not b for b in visitU] return (inverse, visitV)
Bipartite minimum vertex cover by Koenig's theorem :param bigraph: adjacency list, index = vertex in U, value = neighbor list in V :assumption: U = V = {0, 1, 2, ..., n - 1} for n = len(bigraph) :returns: boolean table for U, boolean table for V :comment: selected vertices form a minimum vertex cover, i.e. every edge is adjacent to at least one selected vertex and number of selected vertices is minimum :complexity: `O(|V|*|E|)`
def mine_urls(urls, params=None, callback=None, **kwargs): """Concurrently retrieve URLs. :param urls: A set of URLs to concurrently retrieve. :type urls: iterable :param params: (optional) The URL parameters to send with each request. :type params: dict :param callback: (optional) A callback function to be called on each :py:class:`aiohttp.client.ClientResponse`. :param \*\*kwargs: (optional) Arguments that ``get_miner`` takes. """ miner = Miner(**kwargs) try: miner.loop.add_signal_handler(signal.SIGINT, miner.close) miner.loop.run_until_complete(miner.mine_urls(urls, params, callback)) except RuntimeError: pass
Concurrently retrieve URLs. :param urls: A set of URLs to concurrently retrieve. :type urls: iterable :param params: (optional) The URL parameters to send with each request. :type params: dict :param callback: (optional) A callback function to be called on each :py:class:`aiohttp.client.ClientResponse`. :param \*\*kwargs: (optional) Arguments that ``get_miner`` takes.
def merge_values(values1,values2): ''' Merges two numpy arrays by calculating all possible combinations of rows ''' array1 = values_to_array(values1) array2 = values_to_array(values2) if array1.size == 0: return array2 if array2.size == 0: return array1 merged_array = [] for row_array1 in array1: for row_array2 in array2: merged_row = np.hstack((row_array1,row_array2)) merged_array.append(merged_row) return np.atleast_2d(merged_array)
Merges two numpy arrays by calculating all possible combinations of rows
def remove(self, item): """ Remove an item from the set, returning if it was present """ with self.lock: if item in self.set: self.set.remove(item) return True return False
Remove an item from the set, returning if it was present
def get_string(self, sort_keys=False, pretty=False): """ Returns a string representation of the INCAR. The reason why this method is different from the __str__ method is to provide options for pretty printing. Args: sort_keys (bool): Set to True to sort the INCAR parameters alphabetically. Defaults to False. pretty (bool): Set to True for pretty aligned output. Defaults to False. """ keys = self.keys() if sort_keys: keys = sorted(keys) lines = [] for k in keys: if k == "MAGMOM" and isinstance(self[k], list): value = [] if (isinstance(self[k][0], list) or isinstance(self[k][0], Magmom)) and \ (self.get("LSORBIT") or self.get("LNONCOLLINEAR")): value.append(" ".join(str(i) for j in self[k] for i in j)) elif self.get("LSORBIT") or self.get("LNONCOLLINEAR"): for m, g in itertools.groupby(self[k]): value.append("3*{}*{}".format(len(tuple(g)), m)) else: # float() to ensure backwards compatibility between # float magmoms and Magmom objects for m, g in itertools.groupby(self[k], lambda x: float(x)): value.append("{}*{}".format(len(tuple(g)), m)) lines.append([k, " ".join(value)]) elif isinstance(self[k], list): lines.append([k, " ".join([str(i) for i in self[k]])]) else: lines.append([k, self[k]]) if pretty: return str(tabulate([[l[0], "=", l[1]] for l in lines], tablefmt="plain")) else: return str_delimited(lines, None, " = ") + "\n"
Returns a string representation of the INCAR. The reason why this method is different from the __str__ method is to provide options for pretty printing. Args: sort_keys (bool): Set to True to sort the INCAR parameters alphabetically. Defaults to False. pretty (bool): Set to True for pretty aligned output. Defaults to False.
def gen_relationships(obj) -> Generator[Tuple[str, RelationshipProperty, Type], None, None]: """ Yields tuples of ``(attrname, RelationshipProperty, related_class)`` for all relationships of an ORM object. The object 'obj' can be EITHER an instance OR a class. """ insp = inspect(obj) # type: InstanceState # insp.mapper.relationships is of type # sqlalchemy.utils._collections.ImmutableProperties, which is basically # a sort of AttrDict. for attrname, rel_prop in insp.mapper.relationships.items(): # type: Tuple[str, RelationshipProperty] # noqa # noinspection PyUnresolvedReferences related_class = rel_prop.mapper.class_ # log.critical("gen_relationships: attrname={!r}, " # "rel_prop={!r}, related_class={!r}, rel_prop.info={!r}", # attrname, rel_prop, related_class, rel_prop.info) yield attrname, rel_prop, related_class
Yields tuples of ``(attrname, RelationshipProperty, related_class)`` for all relationships of an ORM object. The object 'obj' can be EITHER an instance OR a class.
def get_resource(self, session, query, api_type, obj_id): """ Fetch a resource. :param session: SQLAlchemy session :param query: Dict of query args :param api_type: Type of the resource :param obj_id: ID of the resource """ resource = self._fetch_resource(session, api_type, obj_id, Permissions.VIEW) include = self._parse_include(query.get('include', '').split(',')) fields = self._parse_fields(query) response = JSONAPIResponse() built = self._render_full_resource(resource, include, fields) response.data['included'] = list(built.pop('included').values()) response.data['data'] = built return response
Fetch a resource. :param session: SQLAlchemy session :param query: Dict of query args :param api_type: Type of the resource :param obj_id: ID of the resource
def getattr(self, name, context=None, class_context=True): """Get an attribute from this class, using Python's attribute semantic. This method doesn't look in the :attr:`instance_attrs` dictionary since it is done by an :class:`Instance` proxy at inference time. It may return an :class:`Uninferable` object if the attribute has not been found, but a ``__getattr__`` or ``__getattribute__`` method is defined. If ``class_context`` is given, then it is considered that the attribute is accessed from a class context, e.g. ClassDef.attribute, otherwise it might have been accessed from an instance as well. If ``class_context`` is used in that case, then a lookup in the implicit metaclass and the explicit metaclass will be done. :param name: The attribute to look for. :type name: str :param class_context: Whether the attribute can be accessed statically. :type class_context: bool :returns: The attribute. :rtype: list(NodeNG) :raises AttributeInferenceError: If the attribute cannot be inferred. """ values = self.locals.get(name, []) if name in self.special_attributes and class_context and not values: result = [self.special_attributes.lookup(name)] if name == "__bases__": # Need special treatment, since they are mutable # and we need to return all the values. result += values return result # don't modify the list in self.locals! values = list(values) for classnode in self.ancestors(recurs=True, context=context): values += classnode.locals.get(name, []) if class_context: values += self._metaclass_lookup_attribute(name, context) if not values: raise exceptions.AttributeInferenceError( target=self, attribute=name, context=context ) # Look for AnnAssigns, which are not attributes in the purest sense. for value in values: if isinstance(value, node_classes.AssignName): stmt = value.statement() if isinstance(stmt, node_classes.AnnAssign) and stmt.value is None: raise exceptions.AttributeInferenceError( target=self, attribute=name, context=context ) return values
Get an attribute from this class, using Python's attribute semantic. This method doesn't look in the :attr:`instance_attrs` dictionary since it is done by an :class:`Instance` proxy at inference time. It may return an :class:`Uninferable` object if the attribute has not been found, but a ``__getattr__`` or ``__getattribute__`` method is defined. If ``class_context`` is given, then it is considered that the attribute is accessed from a class context, e.g. ClassDef.attribute, otherwise it might have been accessed from an instance as well. If ``class_context`` is used in that case, then a lookup in the implicit metaclass and the explicit metaclass will be done. :param name: The attribute to look for. :type name: str :param class_context: Whether the attribute can be accessed statically. :type class_context: bool :returns: The attribute. :rtype: list(NodeNG) :raises AttributeInferenceError: If the attribute cannot be inferred.