code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _crmod_to_abmn(self, configs): A = configs[:, 0] % 1e4 B = np.floor(configs[:, 0] / 1e4).astype(int) M = configs[:, 1] % 1e4 N = np.floor(configs[:, 1] / 1e4).astype(int) ABMN = np.hstack((A[:, np.newaxis], B[:, np.newaxis], M[:, np.newaxis], N[:, np.newaxis])).astype(int) return ABMN
convert crmod-style configurations to a Nx4 array CRMod-style configurations merge A and B, and M and N, electrode numbers into one large integer each: .. math :: AB = A \cdot 10^4 + B MN = M \cdot 10^4 + N Parameters ---------- configs: numpy.ndarray Nx2 array holding the configurations to convert Examples -------- >>> import numpy as np >>> from reda.configs.configManager import ConfigManager >>> config = ConfigManager(nr_of_electrodes=5) >>> crmod_configs = np.array(( ... (10002, 40003), ... (10010, 30004), ... )) >>> abmn = config._crmod_to_abmn(crmod_configs) >>> print(abmn) [[ 2 1 3 4] [10 1 4 3]]
def remove_user(name, **client_args): if not user_exists(name, **client_args): log.info('User \'%s\' does not exist', name) return False client = _client(**client_args) client.drop_user(name) return True
Remove a user. name Name of the user to remove CLI Example: .. code-block:: bash salt '*' influxdb.remove_user <name>
def __cache(self, file, content, document): self.__files_cache.add_content(**{file: CacheData(content=content, document=document)})
Caches given file. :param file: File to cache. :type file: unicode :param content: File content. :type content: list :param document: File document. :type document: QTextDocument
def iter_init_append(self) : "creates a Message.AppendIter for appending arguments to the Message." iter = self.AppendIter(None) dbus.dbus_message_iter_init_append(self._dbobj, iter._dbobj) return \ iter
creates a Message.AppendIter for appending arguments to the Message.
def release_package( self, package_name: str, version: str, manifest_uri: str ) -> bytes: validate_is_supported_manifest_uri(manifest_uri) raw_manifest = to_text(resolve_uri_contents(manifest_uri)) validate_raw_manifest_format(raw_manifest) manifest = json.loads(raw_manifest) validate_manifest_against_schema(manifest) if package_name != manifest['package_name']: raise ManifestValidationError( f"Provided package name: {package_name} does not match the package name " f"found in the manifest: {manifest['package_name']}." ) if version != manifest['version']: raise ManifestValidationError( f"Provided package version: {version} does not match the package version " f"found in the manifest: {manifest['version']}." ) self._validate_set_registry() return self.registry._release(package_name, version, manifest_uri)
Returns the release id generated by releasing a package on the current registry. Requires ``web3.PM`` to have a registry set. Requires ``web3.eth.defaultAccount`` to be the registry owner. * Parameters: * ``package_name``: Must be a valid package name, matching the given manifest. * ``version``: Must be a valid package version, matching the given manifest. * ``manifest_uri``: Must be a valid content-addressed URI. Currently, only IPFS and Github content-addressed URIs are supported.
def _parse_parameter_options(self, options): return self._select_options(options, self.ALL_OPTIONS, invert=True)
Select all unknown options. Select all unknown options (not query string, API, or request options)
def getSignalHeaders(self): signalHeader = [] for chn in np.arange(self.signals_in_file): signalHeader.append(self.getSignalHeader(chn)) return signalHeader
Returns the header of all signals as array of dicts Parameters ---------- None
def get_sky_diffuse(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth, dni, ghi, dhi, dni_extra=None, airmass=None, model='isotropic', model_perez='allsitescomposite1990'): r model = model.lower() if model == 'isotropic': sky = isotropic(surface_tilt, dhi) elif model == 'klucher': sky = klucher(surface_tilt, surface_azimuth, dhi, ghi, solar_zenith, solar_azimuth) elif model == 'haydavies': sky = haydavies(surface_tilt, surface_azimuth, dhi, dni, dni_extra, solar_zenith, solar_azimuth) elif model == 'reindl': sky = reindl(surface_tilt, surface_azimuth, dhi, dni, ghi, dni_extra, solar_zenith, solar_azimuth) elif model == 'king': sky = king(surface_tilt, dhi, ghi, solar_zenith) elif model == 'perez': sky = perez(surface_tilt, surface_azimuth, dhi, dni, dni_extra, solar_zenith, solar_azimuth, airmass, model=model_perez) else: raise ValueError('invalid model selection {}'.format(model)) return sky
r""" Determine in-plane sky diffuse irradiance component using the specified sky diffuse irradiance model. Sky diffuse models include: * isotropic (default) * klucher * haydavies * reindl * king * perez Parameters ---------- surface_tilt : numeric Panel tilt from horizontal. surface_azimuth : numeric Panel azimuth from north. solar_zenith : numeric Solar zenith angle. solar_azimuth : numeric Solar azimuth angle. dni : numeric Direct Normal Irradiance ghi : numeric Global horizontal irradiance dhi : numeric Diffuse horizontal irradiance dni_extra : None or numeric, default None Extraterrestrial direct normal irradiance airmass : None or numeric, default None Airmass model : String, default 'isotropic' Irradiance model. model_perez : String, default 'allsitescomposite1990' See perez. Returns ------- poa_sky_diffuse : numeric
def get_vertices(self, indexed=None): if indexed is None: if (self._vertices is None and self._vertices_indexed_by_faces is not None): self._compute_unindexed_vertices() return self._vertices elif indexed == 'faces': if (self._vertices_indexed_by_faces is None and self._vertices is not None): self._vertices_indexed_by_faces = \ self._vertices[self.get_faces()] return self._vertices_indexed_by_faces else: raise Exception("Invalid indexing mode. Accepts: None, 'faces'")
Get the vertices Parameters ---------- indexed : str | None If Note, return an array (N,3) of the positions of vertices in the mesh. By default, each unique vertex appears only once. If indexed is 'faces', then the array will instead contain three vertices per face in the mesh (and a single vertex may appear more than once in the array). Returns ------- vertices : ndarray The vertices.
def set_base_location(self, location): self.base_location = location self._utm_zone = location.zone self._utm_datum = location.datum self._utm_convergence = location.convergence
Configure the project's base location
def get_for_control_var_and_eval_expr(comm_type, kwargs): control_vars, iter_type, expression = parse_for(comm_type) eval_expression = evaluate_expression(expression, kwargs)[1] iterval = [] if len(control_vars) == 2: if not isinstance(eval_expression, dict): raise exceptions.YamlSyntaxError('Can\'t expand {t} to two control variables.'. format(t=type(eval_expression))) else: iterval = list(eval_expression.items()) elif isinstance(eval_expression, six.string_types): if iter_type == 'word_in': iterval = eval_expression.split() else: iterval = eval_expression else: iterval = eval_expression return control_vars, iterval
Returns tuple that consists of control variable name and iterable that is result of evaluated expression of given for loop. For example: - given 'for $i in $(echo "foo bar")' it returns (['i'], ['foo', 'bar']) - given 'for $i, $j in $foo' it returns (['i', 'j'], [('foo', 'bar')])
def logregularize(self, epsilon=2**-1074): self.numerator.array[self.denominator.array == 0] = epsilon self.denominator.array[self.denominator.array == 0] = 1 return self
Find bins in the denominator that are 0, and set them to 1, while setting the corresponding bin in the numerator to float epsilon. This has the effect of allowing the logarithm of the ratio array to be evaluated without error.
def _title_uptodate(self,fullfile,pid,_title): i=self.fb.get_object(pid) if i.has_key('name'): if _title == i['name']: return True return False
Check fb photo title against provided title, returns true if they match
def generate_password(mode, length): r = random.SystemRandom() length = length or RANDOM_PASSWORD_DEFAULT_LENGTH password = "".join(r.choice(RANDOM_PASSWORD_ALPHABET) for _ in range(length)) if mode == Mode.ECHO: click.echo(style_password(password)) elif mode == Mode.COPY: try: import pyperclip pyperclip.copy(password) result = style_success("*** PASSWORD COPIED TO CLIPBOARD ***") except ImportError: result = style_error('*** PYTHON PACKAGE "PYPERCLIP" NOT FOUND ***') click.echo(result) elif mode == Mode.RAW: click.echo(password)
generate a random password
def make_certificate_signing_request(pkey, digest='sha512', **name): csr = crypto.X509Req() subj = csr.get_subject() subj.C = name.get('C', 'US') subj.ST = name.get('ST', 'CA') subj.L = name.get('L', 'Home') subj.O = name.get('O', 'Home') subj.OU = name.get('OU', 'Unit') subj.CN = name.get('CN', 'Common') csr.set_pubkey(pkey) csr.set_version(3) csr.sign(pkey, digest) return csr
Make a certificate signing request. :param OpenSSL.crypto.PKey pkey: A private key. :param str digest: A valid digest to use. For example, `sha512`. :param name: Key word arguments containing subject name parts: C, ST, L, O, OU, CN. :return: A certificate signing request. :rtype: :class:`OpenSSL.crypto.X509Request`
def read_stats(self): captions, rows = self._get_pages() name_caption_index = captions.index(self.name_caption) captions.pop(name_caption_index) self.captions = captions self.statistics = OrderedDict() for row in rows: name = row.pop(name_caption_index) self.statistics[name] = row
Reads the statistics view from IXN and saves it in statistics dictionary.
def tag(self, path, name): if not path[len(path) - 1] == '/': path += '/' config = self.get_config() folder = self.find_folder({ 'path' : path }, config) if not folder: raise custom_errors.FileNotInConfig(path) old_name = folder['label'] folder['label'] = name dir_config = self.adapter.get_dir_config(path) dir_config['label'] = name self.adapter.set_dir_config(dir_config) self.set_config(config) return old_name
Change name associated with path
def getParameters(self, contactItem): isVIP = False if contactItem is not None: isVIP = contactItem.person.vip return [liveform.Parameter( 'vip', liveform.CHECKBOX_INPUT, bool, 'VIP', default=isVIP)]
Return a list containing a single parameter suitable for changing the VIP status of a person. @type contactItem: L{_PersonVIPStatus} @rtype: C{list} of L{liveform.Parameter}
def addMonitor(self, monitor): token = self.nextMonitorToken self.nextMonitorToken += 1 self.monitors[token] = monitor return token
Subscribe to SingleLayer2DExperiment events. @param monitor (SingleLayer2DExperimentMonitor) An object that implements a set of monitor methods @return (object) An opaque object that can be used to refer to this monitor.
def unicode_to_string(self): for tag in self.tags: self.ununicode.append(str(tag))
Convert unicode in string
def _tile_image(self, data): image = Image.open(BytesIO(data)) return image.convert('RGBA')
Tile binary content as PIL Image.
def load(self, filename, offset): try: self.offset = offset self.fd = open(filename, 'rb') self.fd.seek(self.offset + VOLUME_HEADER_OFFSET) data = self.fd.read(1024) self.vol_header = VolumeHeader(data) self.fd.close() except IOError as e: print(e)
Loads HFS+ volume information
def apply_change(self, path, *args): if len(path) > 1: self[path[0]].apply_change(path[1:], *args) else: assert len(path) == 1 and len(args) == 1, \ "Cannot process change %s" % ([self.path + path] + list(args)) getattr(self, "set_%s" % path[0])(args[0])
Take a single change from a Delta and apply it to this model
def compute_mean_reward(rollouts, clipped): reward_name = "reward" if clipped else "unclipped_reward" rewards = [] for rollout in rollouts: if rollout[-1].done: rollout_reward = sum(getattr(frame, reward_name) for frame in rollout) rewards.append(rollout_reward) if rewards: mean_rewards = np.mean(rewards) else: mean_rewards = 0 return mean_rewards
Calculate mean rewards from given epoch.
def setDaemon(self, runnable, isdaemon, noregister = False): if not noregister and runnable not in self.registerIndex: self.register((), runnable) if isdaemon: self.daemons.add(runnable) else: self.daemons.discard(runnable)
If a runnable is a daemon, it will not keep the main loop running. The main loop will end when all alived runnables are daemons.
def _output(self, message, verbosity, exact, stream): if exact: if self.config.verbosity == verbosity: stream.write(message + "\n") else: if self.config.verbosity >= verbosity: stream.write(message + "\n")
Output a message if the config's verbosity is >= to the given verbosity. If exact == True, the message will only be outputted if the given verbosity exactly matches the config's verbosity.
def fetch(code) : ret = {} code = KeywordFetcher._remove_strings(code) result = KeywordFetcher.prog.findall(code) for keyword in result : if len(keyword) <= 1: continue if keyword.isdigit(): continue if keyword[0] == '-' or keyword[0] == '*' : keyword = keyword[1:] if keyword[-1] == '-' or keyword[-1] == '*' : keyword = keyword[0:-1] if len(keyword) <= 1: continue ret[ keyword ] = ret.get(keyword, 0) + 1 return ret
Fetch keywords by Code
def get_install_value(self, value_name, wanted_type=None): try: item_value, item_type = self.__reg_query_value(self.__reg_uninstall_handle, value_name) except pywintypes.error as exc: if exc.winerror == winerror.ERROR_FILE_NOT_FOUND: return None raise if wanted_type and item_type not in self.__reg_types[wanted_type]: item_value = None return item_value
For the uninstall section of the registry return the name value. Args: value_name (str): Registry value name. wanted_type (str): The type of value wanted if the type does not match None is return. wanted_type support values are ``str`` ``int`` ``list`` ``bytes``. Returns: value: Value requested or None if not found.
def fft_convolve(data, h, res_g = None, plan = None, inplace = False, kernel_is_fft = False, kernel_is_fftshifted = False): if isinstance(data,np.ndarray): return _fft_convolve_numpy(data, h, plan = plan, kernel_is_fft = kernel_is_fft, kernel_is_fftshifted = kernel_is_fftshifted) elif isinstance(data,OCLArray): return _fft_convolve_gpu(data,h, res_g = res_g, plan = plan, inplace = inplace, kernel_is_fft = kernel_is_fft) else: raise TypeError("array argument (1) has bad type: %s"%type(data))
convolves data with kernel h via FFTs data should be either a numpy array or a OCLArray (see doc for fft) both data and h should be same shape if data/h are OCLArrays, then: - type should be complex64 - shape should be equal and power of two - h is assumed to be already fftshifted (otherwise set kernel_is_fftshifted to true)
def from_int(cls, integer): bin_string = bin(integer) return cls( text=len(bin_string) >= 1 and bin_string[-1] == "1", comment=len(bin_string) >= 2 and bin_string[-2] == "1", user=len(bin_string) >= 3 and bin_string[-3] == "1", restricted=len(bin_string) >= 4 and bin_string[-4] == "1" )
Constructs a `Deleted` using the `tinyint` value of the `rev_deleted` column of the `revision` MariaDB table. * DELETED_TEXT = 1 * DELETED_COMMENT = 2 * DELETED_USER = 4 * DELETED_RESTRICTED = 8
def split_all(reference, sep): parts = partition_all(reference, sep) return [p for p in parts if p not in sep]
Splits a given string at a given separator or list of separators. :param reference: The reference to split. :param sep: Separator string or list of separator strings. :return: A list of split strings
def steady_state_potential(xdata,HistBins=100): import numpy as _np pops=_np.histogram(xdata,HistBins)[0] bins=_np.histogram(xdata,HistBins)[1] bins=bins[0:-1] bins=bins+_np.mean(_np.diff(bins)) pops=pops/float(_np.sum(pops)) return bins,-_np.log(pops)
Calculates the steady state potential. Used in fit_radius_from_potentials. Parameters ---------- xdata : ndarray Position data for a degree of freedom HistBins : int Number of bins to use for histogram of xdata. Number of position points at which the potential is calculated. Returns ------- position : ndarray positions at which potential has been calculated potential : ndarray value of potential at the positions above
def load_writer_configs(writer_configs, ppp_config_dir, **writer_kwargs): try: writer_info = read_writer_config(writer_configs) writer_class = writer_info['writer'] except (ValueError, KeyError, yaml.YAMLError): raise ValueError("Invalid writer configs: " "'{}'".format(writer_configs)) init_kwargs, kwargs = writer_class.separate_init_kwargs(writer_kwargs) writer = writer_class(ppp_config_dir=ppp_config_dir, config_files=writer_configs, **init_kwargs) return writer, kwargs
Load the writer from the provided `writer_configs`.
def map_compute_fov( m: tcod.map.Map, x: int, y: int, radius: int = 0, light_walls: bool = True, algo: int = FOV_RESTRICTIVE, ) -> None: m.compute_fov(x, y, radius, light_walls, algo)
Compute the field-of-view for a map instance. .. deprecated:: 4.5 Use :any:`tcod.map.Map.compute_fov` instead.
def _get_char_pixels(self, s): if len(s) == 1 and s in self._text_dict.keys(): return list(self._text_dict[s]) else: return list(self._text_dict['?'])
Internal. Safeguards the character indexed dictionary for the show_message function below
def get_lastfunction_header(self, header, default_return_value=None): if self._last_call is None: raise TwythonError('This function must be called after an API call. \ It delivers header information.') return self._last_call['headers'].get(header, default_return_value)
Returns a specific header from the last API call This will return None if the header is not present :param header: (required) The name of the header you want to get the value of Most useful for the following header information: x-rate-limit-limit, x-rate-limit-remaining, x-rate-limit-class, x-rate-limit-reset
def to_mapping(self,**values): strike, dip, rake = self.strike_dip_rake() min, max = self.angular_errors() try: disabled = self.disabled except AttributeError: disabled = False mapping = dict( uid=self.hash, axes=self.axes.tolist(), hyperbolic_axes=self.hyperbolic_axes.tolist(), max_angular_error=max, min_angular_error=min, strike=strike, dip=dip, rake=rake, disabled=disabled) for k,v in values.items(): mapping[k] = v return mapping
Create a JSON-serializable representation of the plane that is usable with the javascript frontend
def to_str(self, separator=''): if self.closed(): raise ValueError("Attempt to call to_str() on a closed Queryable.") return str(separator).join(self.select(str))
Build a string from the source sequence. The elements of the query result will each coerced to a string and then the resulting strings concatenated to return a single string. This allows the natural processing of character sequences as strings. An optional separator which will be inserted between each item may be specified. Note: this method uses immediate execution. Args: separator: An optional separator which will be coerced to a string and inserted between each source item in the resulting string. Returns: A single string which is the result of stringifying each element and concatenating the results into a single string. Raises: TypeError: If any element cannot be coerced to a string. TypeError: If the separator cannot be coerced to a string. ValueError: If the Queryable is closed.
def _check_typecode_list(ofwhat, tcname): for o in ofwhat: if callable(o): continue if not isinstance(o, TypeCode): raise TypeError( tcname + ' ofwhat outside the TypeCode hierarchy, ' + str(o.__class__)) if o.pname is None and not isinstance(o, AnyElement): raise TypeError(tcname + ' element ' + str(o) + ' has no name')
Check a list of typecodes for compliance with Struct requirements.
def text(self): def _problem_iter(problem_num): problem_file = os.path.join(EULER_DATA, 'problems.txt') with open(problem_file) as f: is_problem = False last_line = '' for line in f: if line.strip() == 'Problem %i' % problem_num: is_problem = True if is_problem: if line == last_line == '\n': break else: yield line[:-1] last_line = line problem_lines = [line for line in _problem_iter(self.num)] if problem_lines: return '\n'.join(problem_lines[3:-1]) else: msg = 'Problem %i not found in problems.txt.' % self.num click.secho(msg, fg='red') click.echo('If this problem exists on Project Euler, consider ' 'submitting a pull request to EulerPy on GitHub.') sys.exit(1)
Parses problems.txt and returns problem text
def edit_dedicated_fwl_rules(self, firewall_id, rules): mask = ('mask[networkVlan[firewallInterfaces' '[firewallContextAccessControlLists]]]') svc = self.client['Network_Vlan_Firewall'] fwl = svc.getObject(id=firewall_id, mask=mask) network_vlan = fwl['networkVlan'] for fwl1 in network_vlan['firewallInterfaces']: if fwl1['name'] == 'inside': continue for control_list in fwl1['firewallContextAccessControlLists']: if control_list['direction'] == 'out': continue fwl_ctx_acl_id = control_list['id'] template = {'firewallContextAccessControlListId': fwl_ctx_acl_id, 'rules': rules} svc = self.client['Network_Firewall_Update_Request'] return svc.createObject(template)
Edit the rules for dedicated firewall. :param integer firewall_id: the instance ID of the dedicated firewall :param list rules: the rules to be pushed on the firewall as defined by SoftLayer_Network_Firewall_Update_Request_Rule
def tmsiReallocationCommand(): a = TpPd(pd=0x5) b = MessageType(mesType=0x1a) c = LocalAreaId() d = MobileId() packet = a / b / c / d return packet
TMSI REALLOCATION COMMAND Section 9.2.17
def get_path(self, appendix=None, by_name=False): if by_name: state_identifier = self.name else: state_identifier = self.state_id if not self.is_root_state: if appendix is None: return self.parent.get_path(state_identifier, by_name) else: return self.parent.get_path(state_identifier + PATH_SEPARATOR + appendix, by_name) else: if appendix is None: return state_identifier else: return state_identifier + PATH_SEPARATOR + appendix
Recursively create the path of the state. The path is generated in bottom up method i.e. from the nested child states to the root state. The method concatenates either State.state_id (always unique) or State.name (maybe not unique but human readable) as state identifier for the path. :param str appendix: the part of the path that was already calculated by previous function calls :param bool by_name: The boolean enables name usage to generate the path :rtype: str :return: the full path to the root state
def download( state, host, source_url, destination, user=None, group=None, mode=None, cache_time=None, force=False, ): info = host.fact.file(destination) if info is False: raise OperationError( 'Destination {0} already exists and is not a file'.format(destination), ) download = force if info is None: download = True elif cache_time: cache_time = host.fact.date.replace(tzinfo=None) - timedelta(seconds=cache_time) if info['mtime'] and info['mtime'] > cache_time: download = True if download: yield 'wget -q {0} -O {1}'.format(source_url, destination) if user or group: yield chown(destination, user, group) if mode: yield chmod(destination, mode)
Download files from remote locations. + source_url: source URl of the file + destination: where to save the file + user: user to own the files + group: group to own the files + mode: permissions of the files + cache_time: if the file exists already, re-download after this time (in s) + force: always download the file, even if it already exists
def _next_regular(target): if target <= 6: return target if not (target & (target - 1)): return target match = float('inf') p5 = 1 while p5 < target: p35 = p5 while p35 < target: quotient = -(-target // p35) try: p2 = 2 ** ((quotient - 1).bit_length()) except AttributeError: p2 = 2 ** _bit_length_26(quotient - 1) N = p2 * p35 if N == target: return N elif N < match: match = N p35 *= 3 if p35 == target: return p35 if p35 < match: match = p35 p5 *= 5 if p5 == target: return p5 if p5 < match: match = p5 return match
Find the next regular number greater than or equal to target. Regular numbers are composites of the prime factors 2, 3, and 5. Also known as 5-smooth numbers or Hamming numbers, these are the optimal size for inputs to FFTPACK. Target must be a positive integer.
def _find_node_by_indices(self, point): path_index, node_index = point path = self.paths[int(path_index)] node = path.nodes[int(node_index)] return node
Find the GSNode that is refered to by the given indices. See GSNode::_indices()
def resort(self, columnName): csc = self.currentSortColumn newSortColumn = self.columns[columnName] if newSortColumn is None: raise Unsortable('column %r has no sort attribute' % (columnName,)) if csc is newSortColumn: self.isAscending = not self.isAscending else: self.currentSortColumn = newSortColumn self.isAscending = True return self.isAscending
Re-sort the table. @param columnName: the name of the column to sort by. This is a string because it is passed from the browser.
def OnExpandAll(self): root = self.tree.GetRootItem() fn = self.tree.Expand self.traverse(root, fn) self.tree.Expand(root)
expand all nodes
def getprimeover(N): if HAVE_GMP: randfunc = random.SystemRandom() r = gmpy2.mpz(randfunc.getrandbits(N)) r = gmpy2.bit_set(r, N - 1) return int(gmpy2.next_prime(r)) elif HAVE_CRYPTO: return number.getPrime(N, os.urandom) else: randfunc = random.SystemRandom() n = randfunc.randrange(2**(N-1), 2**N) | 1 while not is_prime(n): n += 2 return n
Return a random N-bit prime number using the System's best Cryptographic random source. Use GMP if available, otherwise fallback to PyCrypto
def dumpBlock(self, block_name): try: return self.dbsBlock.dumpBlock(block_name) except HTTPError as he: raise he except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError) except Exception as ex: sError = "DBSReaderModel/dumpBlock. %s\n. Exception trace: \n %s" \ % (ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', ex.message, self.logger.exception, sError)
API the list all information related with the block_name :param block_name: Name of block to be dumped (Required) :type block_name: str
def merge_adjacent(numbers, indicator='..', base=0): integers = list(sorted([(int("%s" % i, base), i) for i in numbers])) idx = 0 result = [] while idx < len(numbers): end = idx + 1 while end < len(numbers) and integers[end-1][0] == integers[end][0] - 1: end += 1 result.append("%s%s%s" % (integers[idx][1], indicator, integers[end-1][1]) if end > idx + 1 else "%s" % integers[idx][1]) idx = end return result
Merge adjacent numbers in an iterable of numbers. Parameters: numbers (list): List of integers or numeric strings. indicator (str): Delimiter to indicate generated ranges. base (int): Passed to the `int()` conversion when comparing numbers. Return: list of str: Condensed sequence with either ranges or isolated numbers.
def download_sample(job, sample, inputs): uuid, url = sample job.fileStore.logToMaster('Downloading sample: {}'.format(uuid)) tar_id = job.addChildJobFn(download_url_job, url, s3_key_path=inputs.ssec, disk='30G').rv() sample_inputs = argparse.Namespace(**vars(inputs)) sample_inputs.uuid = uuid sample_inputs.cores = multiprocessing.cpu_count() job.addFollowOnJobFn(process_sample, sample_inputs, tar_id, cores=2, disk='60G')
Download the input sample :param JobFunctionWrappingJob job: passed by Toil automatically :param tuple sample: Tuple containing (UUID,URL) of a sample :param Namespace inputs: Stores input arguments (see main)
def _find_observable_paths(extra_files=None): rv = set( os.path.dirname(os.path.abspath(x)) if os.path.isfile(x) else os.path.abspath(x) for x in sys.path ) for filename in extra_files or (): rv.add(os.path.dirname(os.path.abspath(filename))) for module in list(sys.modules.values()): fn = getattr(module, "__file__", None) if fn is None: continue fn = os.path.abspath(fn) rv.add(os.path.dirname(fn)) return _find_common_roots(rv)
Finds all paths that should be observed.
def _init_client(): if client is not None: return global _mysql_kwargs, _table_name _mysql_kwargs = { 'host': __opts__.get('mysql.host', '127.0.0.1'), 'user': __opts__.get('mysql.user', None), 'passwd': __opts__.get('mysql.password', None), 'db': __opts__.get('mysql.database', _DEFAULT_DATABASE_NAME), 'port': __opts__.get('mysql.port', 3306), 'unix_socket': __opts__.get('mysql.unix_socket', None), 'connect_timeout': __opts__.get('mysql.connect_timeout', None), 'autocommit': True, } _table_name = __opts__.get('mysql.table_name', _table_name) for k, v in _mysql_kwargs.items(): if v is None: _mysql_kwargs.pop(k) kwargs_copy = _mysql_kwargs.copy() kwargs_copy['passwd'] = "<hidden>" log.info("mysql_cache: Setting up client with params: %r", kwargs_copy) _create_table()
Initialize connection and create table if needed
def format_payload(self, api_version, data): if (api_version in (1, 2)): if type(data) == str: logger.debug('Converting string to dict:\n%s' % data) data = data.lstrip('?') data = data.rstrip('&') data = parse_qs(data) logger.debug('Converted:\n%s' % str(data)) elif api_version in ('am', 'was', 'am2'): if type(data) == etree._Element: logger.debug('Converting lxml.builder.E to string') data = etree.tostring(data) logger.debug('Converted:\n%s' % data) return data
Return appropriate QualysGuard API call.
def cto(self): cto = -1 try: if self.lnk.type == Lnk.CHARSPAN: cto = self.lnk.data[1] except AttributeError: pass return cto
The final character position in the surface string. Defaults to -1 if there is no valid cto value.
def get_default_config_directory(): test_path = os.path.dirname(os.path.realpath(inspect.getouterframes(inspect.currentframe())[2][1])) return os.path.join(test_path, 'conf')
Return default config directory, based in the actual test path :returns: default config directory
def set_mindays(name, mindays): pre_info = info(name) if mindays == pre_info['min']: return True cmd = 'passwd -n {0} {1}'.format(mindays, name) __salt__['cmd.run'](cmd, python_shell=False) post_info = info(name) if post_info['min'] != pre_info['min']: return post_info['min'] == mindays return False
Set the minimum number of days between password changes. See man passwd. CLI Example: .. code-block:: bash salt '*' shadow.set_mindays username 7
def delete_word(word): conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db')) curs = conn.cursor() curs.execute('SELECT expl, pr FROM Word WHERE name = "%s"' % word) res = curs.fetchall() if res: try: curs.execute('DELETE FROM Word WHERE name = "%s"' % word) except Exception as e: print(e) else: print(colored('%s has been deleted from database' % word, 'green')) conn.commit() finally: curs.close() conn.close() else: print(colored('%s not exists in the database' % word, 'white', 'on_red'))
delete the word or phrase from database.
def activate(): global PathFinder, FileFinder, ff_path_hook path_hook_index = len(sys.path_hooks) sys.path_hooks.append(ff_path_hook) sys.path_importer_cache.clear() pathfinder_index = len(sys.meta_path) sys.meta_path.append(PathFinder) return path_hook_index, pathfinder_index
Install the path-based import components.
async def validate(state, holdout_glob): if not glob.glob(holdout_glob): print('Glob "{}" didn\'t match any files, skipping validation'.format( holdout_glob)) else: await run( 'python3', 'validate.py', holdout_glob, '--flagfile={}'.format(os.path.join(FLAGS.flags_dir, 'validate.flags')), '--work_dir={}'.format(fsdb.working_dir()))
Validate the trained model against holdout games. Args: state: the RL loop State instance. holdout_glob: a glob that matches holdout games.
def get(self, cluster_id, show_progress=False): url = ('/clusters/%(cluster_id)s?%(params)s' % {"cluster_id": cluster_id, "params": parse.urlencode({"show_progress": show_progress})}) return self._get(url, 'cluster')
Get information about a Cluster.
def dcmtoquat(dcm): quat = np.zeros(4) quat[-1] = 1/2*np.sqrt(np.trace(dcm)+1) quat[0:3] = 1/4/quat[-1]*vee_map(dcm-dcm.T) return quat
Convert DCM to quaternion This function will convert a rotation matrix, also called a direction cosine matrix into the equivalent quaternion. Parameters: ---------- dcm - (3,3) numpy array Numpy rotation matrix which defines a rotation from the b to a frame Returns: -------- quat - (4,) numpy array Array defining a quaterion where the quaternion is defined in terms of a vector and a scalar part. The vector is related to the eigen axis and equivalent in both reference frames [x y z w]
def add_mixl_specific_results_to_estimation_res(estimator, results_dict): prob_res = mlc.calc_choice_sequence_probs(results_dict["long_probs"], estimator.choice_vector, estimator.rows_to_mixers, return_type='all') results_dict["simulated_sequence_probs"] = prob_res[0] results_dict["expanded_sequence_probs"] = prob_res[1] return results_dict
Stores particular items in the results dictionary that are unique to mixed logit-type models. In particular, this function calculates and adds `sequence_probs` and `expanded_sequence_probs` to the results dictionary. The `constrained_pos` object is also stored to the results_dict. Parameters ---------- estimator : an instance of the MixedEstimator class. Should contain a `choice_vector` attribute that is a 1D ndarray representing the choices made for this model's dataset. Should also contain a `rows_to_mixers` attribute that maps each row of the long format data to a unit of observation that the mixing is being performed over. results_dict : dict. This dictionary should be the dictionary returned from scipy.optimize.minimize. In particular, it should have the following `long_probs` key. Returns ------- results_dict.
def _try_get_current_manager(cls): if utils.get_distro_name().find('gentoo') == -1: return None if 'PACKAGE_MANAGER' in os.environ: pm = os.environ['PACKAGE_MANAGER'] if pm == 'paludis': try: import paludis return GentooPackageManager.PALUDIS except ImportError: cls._debug_doesnt_work('can\'t import paludis', name='PaludisPackageManager') return None elif pm == 'portage': pass else: return None try: import portage return GentooPackageManager.PORTAGE except ImportError: cls._debug_doesnt_work('can\'t import portage', name='EmergePackageManager') return None
Try to detect a package manager used in a current Gentoo system.
def _pause_all_nodes(self, max_thread_pool_size=0): failed = 0 def _pause_specific_node(node): if not node.instance_id: log.warning("Node `%s` has no instance id." " It is either already stopped, or" " never created properly. Not attempting" " to stop it again.", node.name) return None try: return node.pause() except Exception as err: log.error( "Could not stop node `%s` (instance ID `%s`): %s %s", node.name, node.instance_id, err, err.__class__) node.update_ips() return None nodes = self.get_all_nodes() thread_pool = self._make_thread_pool(max_thread_pool_size) for node, state in zip(nodes, thread_pool.map(_pause_specific_node, nodes)): if state is None: failed += 1 else: self.paused_nodes[node.name] = state return failed
Pause all cluster nodes - ensure that we store data so that in the future the nodes can be restarted. :return: int - number of failures.
def update(self, name, modifiers): self.update_name(name) self.modifiers = modifiers
Updates the attributes for the subroutine instance, handles name changes in the parent module as well.
def coordinates(self): i = self._coordinates for name, _i in COORDINATES.items(): if i==_i: return name return i
Get or set the internal coordinate system. Available coordinate systems are: - ``'jacobi'`` (default) - ``'democraticheliocentric'`` - ``'whds'``
def set_theme(self, theme_name, toplevel=None, themebg=None): if self._toplevel is not None and toplevel is None: toplevel = self._toplevel if self._themebg is not None and themebg is None: themebg = self._themebg ThemedWidget.set_theme(self, theme_name) color = self._get_bg_color() if themebg is True: self.config(background=color) if toplevel is True: self._setup_toplevel_hook(color)
Redirect the set_theme call to also set Tk background color
def ensure_path(path, mode=0o777): if path: try: umask = os.umask(000) os.makedirs(path, mode) os.umask(umask) except OSError as e: if e.errno != errno.EEXIST: raise
Ensure that path exists in a multiprocessing safe way. If the path does not exist, recursively create it and its parent directories using the provided mode. If the path already exists, do nothing. The umask is cleared to enable the mode to be set, and then reset to the original value after the mode is set. Parameters ---------- path : str file system path to a non-existent directory that should be created. mode : int octal representation of the mode to use when creating the directory. Raises ------ OSError If os.makedirs raises an OSError for any reason other than if the directory already exists.
def mget_list(item, index): 'get mulitple items via index of int, slice or list' if isinstance(index, (int, slice)): return item[index] else: return map(item.__getitem__, index)
get mulitple items via index of int, slice or list
def cmd_iter( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, **kwargs): was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: if kwargs.get('yield_pub_data'): yield pub_data for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, **kwargs): if not fn_ret: continue yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub()
Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}}
def get_related(self, instance, number): related_pks = self.compute_related(instance.pk)[:number] related_pks = [pk for pk, score in related_pks] related_objects = sorted( self.queryset.model.objects.filter(pk__in=related_pks), key=lambda x: related_pks.index(x.pk)) return related_objects
Return a list of the most related objects to instance.
def get_arguments(self, name: str, strip: bool = True) -> List[str]: assert isinstance(strip, bool) return self._get_arguments(name, self.request.arguments, strip)
Returns a list of the arguments with the given name. If the argument is not present, returns an empty list. This method searches both the query and body arguments.
def get_role_config_group(self, name): return role_config_groups.get_role_config_group( self._get_resource_root(), self.name, name, self._get_cluster_name())
Get a role configuration group in the service by name. @param name: The name of the role config group. @return: An ApiRoleConfigGroup object. @since: API v3
def decode_consumer_metadata_response(cls, data): ((correlation_id, error, nodeId), cur) = relative_unpack('>ihi', data, 0) (host, cur) = read_short_string(data, cur) ((port,), cur) = relative_unpack('>i', data, cur) return kafka.structs.ConsumerMetadataResponse(error, nodeId, host, port)
Decode bytes to a kafka.structs.ConsumerMetadataResponse Arguments: data: bytes to decode
def _bibliography(doc, terms, converters=[], format='html'): output_backend = 'latex' if format == 'latex' else MetatabHtmlBackend def mk_cite(v): for c in converters: r = c(v) if r is not False: return r return make_citation_dict(v) if isinstance(doc, MetatabDoc): d = [mk_cite(t) for t in terms] cd = {e['name_link']: e for e in d} else: cd = {k: mk_cite(v, i) for i, (k, v) in enumerate(doc.items())} return PybtexEngine().format_from_string(safe_dump({'entries': cd}), style=MetatabStyle, output_backend=output_backend, bib_format='yaml')
Render citations, from a document or a doct of dicts If the input is a dict, each key is the name of the citation, and the value is a BibTex formatted dict :param doc: A MetatabDoc, or a dict of BibTex dicts :return:
def _nix_collect_garbage(): nixhome = os.path.join(os.path.expanduser('~{0}'.format(__opts__['user'])), '.nix-profile/bin/') return [os.path.join(nixhome, 'nix-collect-garbage')]
Make sure we get the right nix-store, too.
def get_connection_id_by_endpoint(self, endpoint): with self._connections_lock: for connection_id in self._connections: connection_info = self._connections[connection_id] if connection_info.uri == endpoint: return connection_id raise KeyError()
Returns the connection id associated with a publically reachable endpoint or raises KeyError if the endpoint is not found. Args: endpoint (str): A zmq-style uri which identifies a publically reachable endpoint.
def _indexable_tags(self): tags = current_app.extensions.get("tags") if not tags or not tags.supports_taggings(self): return "" default_ns = tags.entity_default_ns(self) return [t for t in tags.entity_tags(self) if t.ns == default_ns]
Index tag ids for tags defined in this Entity's default tags namespace.
def get_tip_coordinates(self, axis=None): coords = self.get_node_coordinates() if axis == 'x': return coords[:self.ntips, 0] elif axis == 'y': return coords[:self.ntips, 1] return coords[:self.ntips]
Returns coordinates of the tip positions for a tree. If no argument for axis then a 2-d array is returned. The first column is the x coordinates the second column is the y-coordinates. If you enter an argument for axis then a 1-d array will be returned of just that axis.
def press_event(self): if self.mouse_event.press_event is None: return None ev = self.copy() ev.mouse_event = self.mouse_event.press_event return ev
The mouse press event that initiated a mouse drag, if any.
def _counts_at_position(positions, orig_reader, cmp_reader): pos_counts = collections.defaultdict(lambda: collections.defaultdict(lambda: collections.defaultdict(int))) for orig_parts in orig_reader: cmp_parts = next(cmp_reader) for pos in positions: try: pos_counts[pos][int(orig_parts[pos+1])][int(cmp_parts[pos+1])] += 1 except IndexError: pass for pos, count_dict in pos_counts.iteritems(): for orig_val, cmp_dict in count_dict.iteritems(): for cmp_val, count in cmp_dict.iteritems(): yield pos+1, orig_val, cmp_val, count
Combine orignal and new qualities at each position, generating counts.
def command(self, func): command = Command(func) self._commands[func.__name__] = command return func
Decorator to add a command function to the registry. :param func: command function.
def ffht(fEM, time, freq, ftarg): r ffhtfilt = ftarg[0] pts_per_dec = ftarg[1] kind = ftarg[2] if pts_per_dec == 0: fEM = fEM.reshape(time.size, -1) tEM = dlf(fEM, 2*np.pi*freq, time, ffhtfilt, pts_per_dec, kind=kind) return tEM, True
r"""Fourier Transform using the Digital Linear Filter method. It follows the Filter methodology [Ande75]_, using Cosine- and Sine-filters; see ``fht`` for more information. The function is called from one of the modelling routines in :mod:`model`. Consult these modelling routines for a description of the input and output parameters. This function is based on ``get_CSEM1D_TD_FHT.m`` from the source code distributed with [Key12]_. Returns ------- tEM : array Returns time-domain EM response of ``fEM`` for given ``time``. conv : bool Only relevant for QWE/QUAD.
def value(self): try: if isinstance(self.__value, Expression): return self.__value.value return self.__value except AttributeError: return 0
Set a calculated value for this Expression. Used when writing formulas using XlsxWriter to give cells an initial value when the sheet is loaded without being calculated.
def wait_for_event(event): f = Future() def ready(): get_event_loop().remove_win32_handle(event) f.set_result(None) get_event_loop().add_win32_handle(event, ready) return f
Wraps a win32 event into a `Future` and wait for it.
def parse_ppi_graph(path: str, min_edge_weight: float = 0.0) -> Graph: logger.info("In parse_ppi_graph()") graph = igraph.read(os.path.expanduser(path), format="ncol", directed=False, names=True) graph.delete_edges(graph.es.select(weight_lt=min_edge_weight)) graph.delete_vertices(graph.vs.select(_degree=0)) logger.info(f"Loaded PPI network.\n" f"Number of proteins: {len(graph.vs)}\n" f"Number of interactions: {len(graph.es)}\n") return graph
Build an undirected graph of gene interactions from edgelist file. :param str path: The path to the edgelist file :param float min_edge_weight: Cutoff to keep/remove the edges, default is 0, but could also be 0.63. :return Graph: Protein-protein interaction graph
def get_formatter(columns): column_map = dict((column.name, column) for column in columns) def validate(ctx, param, value): if value == '': raise click.BadParameter('At least one column is required.') formatter = ColumnFormatter() for column in [col.strip() for col in value.split(',')]: if column in column_map: formatter.add_column(column_map[column]) else: formatter.add_column(Column(column, column.split('.'))) return formatter return validate
This function returns a callback to use with click options. The returned function parses a comma-separated value and returns a new ColumnFormatter. :param columns: a list of Column instances
def forward_algo(self,observations): total_stages = len(observations) ob_ind = self.obs_map[ observations[0] ] alpha = np.multiply ( np.transpose(self.em_prob[:,ob_ind]) , self.start_prob ) for curr_t in range(1,total_stages): ob_ind = self.obs_map[observations[curr_t]] alpha = np.dot( alpha , self.trans_prob) alpha = np.multiply( alpha , np.transpose( self.em_prob[:,ob_ind] )) total_prob = alpha.sum() return ( total_prob )
Finds the probability of an observation sequence for given model parameters **Arguments**: :param observations: The observation sequence, where each element belongs to 'observations' variable declared with __init__ object. :type observations: A list or tuple :return: The probability of occurence of the observation sequence :rtype: float **Example**: >>> states = ('s', 't') >>> possible_observation = ('A','B' ) >>> # Numpy arrays of the data >>> start_probability = np.matrix( '0.5 0.5 ') >>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ') >>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' ) >>> # Initialize class object >>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability) >>> observations = ('A', 'B','B','A') >>> print(test.forward_algo(observations)) .. note:: No scaling applied here and hence this routine is susceptible to underflow errors. Use :func:`hmm.log_prob` instead.
def _validate_tag_key(self, tag_key, exception_param='tags.X.member.key'): if len(tag_key) > 128: raise TagKeyTooBig(tag_key, param=exception_param) match = re.findall(r'[\w\s_.:/=+\-@]+', tag_key) if not len(match) or len(match[0]) < len(tag_key): raise InvalidTagCharacters(tag_key, param=exception_param)
Validates the tag key. :param all_tags: Dict to check if there is a duplicate tag. :param tag_key: The tag key to check against. :param exception_param: The exception parameter to send over to help format the message. This is to reflect the difference between the tag and untag APIs. :return:
def from_dict(cls, d): key = d.get("name") options = d.get("options", None) subkey_list = d.get("subkeys", []) if len(subkey_list) > 0: subkeys = list(map(lambda k: AdfKey.from_dict(k), subkey_list)) else: subkeys = None return cls(key, options, subkeys)
Construct a MSONable AdfKey object from the JSON dict. Parameters ---------- d : dict A dict of saved attributes. Returns ------- adfkey : AdfKey An AdfKey object recovered from the JSON dict ``d``.
def _handle_hidden_tables(self, tbl_list, attr_name): if not self.displayed_only: return tbl_list return [x for x in tbl_list if "display:none" not in getattr(x, attr_name).get('style', '').replace(" ", "")]
Return list of tables, potentially removing hidden elements Parameters ---------- tbl_list : list of node-like Type of list elements will vary depending upon parser used attr_name : str Name of the accessor for retrieving HTML attributes Returns ------- list of node-like Return type matches `tbl_list`
def unescape_code_start(source, ext, language='python'): parser = StringParser(language) for pos, line in enumerate(source): if not parser.is_quoted() and is_escaped_code_start(line, ext): unescaped = unesc(line, language) if is_escaped_code_start(unescaped, ext): source[pos] = unescaped parser.read_line(line) return source
Unescape code start
def _data_dep_init(self, inputs): with tf.variable_scope("data_dep_init"): activation = self.layer.activation self.layer.activation = None x_init = self.layer.call(inputs) m_init, v_init = tf.moments(x_init, self.norm_axes) scale_init = 1. / tf.sqrt(v_init + 1e-10) self.layer.g = self.layer.g * scale_init self.layer.bias = (-m_init * scale_init) self.layer.activation = activation self.initialized = True
Data dependent initialization for eager execution.
def _set_labels(self, catalogue): with self._conn: self._conn.execute(constants.UPDATE_LABELS_SQL, ['']) labels = {} for work, label in catalogue.items(): self._conn.execute(constants.UPDATE_LABEL_SQL, [label, work]) cursor = self._conn.execute( constants.SELECT_TEXT_TOKEN_COUNT_SQL, [work]) token_count = cursor.fetchone()['token_count'] labels[label] = labels.get(label, 0) + token_count return labels
Returns a dictionary of the unique labels in `catalogue` and the count of all tokens associated with each, and sets the record of each Text to its corresponding label. Texts that do not have a label specified are set to the empty string. Token counts are included in the results to allow for semi-accurate sorting based on corpora size. :param catalogue: catalogue matching filenames to labels :type catalogue: `Catalogue` :rtype: `dict`
def read(path, encoding="utf-8"): with open(path, "rb") as f: content = f.read() try: text = content.decode(encoding) except: res = chardet.detect(content) text = content.decode(res["encoding"]) return text
Auto-decoding string reader. Usage:: >>> from angora.dataIO import textfile or >>> from angora.dataIO import * >>> textfile.read("test.txt")
def _prior_headerfooter(self): preceding_sectPr = self._sectPr.preceding_sectPr return ( None if preceding_sectPr is None else _Header(preceding_sectPr, self._document_part, self._hdrftr_index) )
|_Header| proxy on prior sectPr element or None if this is first section.
def eye(N, M=0, k=0, ctx=None, dtype=None, **kwargs): if ctx is None: ctx = current_context() dtype = mx_real_t if dtype is None else dtype return _internal._eye(N=N, M=M, k=k, ctx=ctx, dtype=dtype, **kwargs)
Return a 2-D array with ones on the diagonal and zeros elsewhere. Parameters ---------- N: int Number of rows in the output. M: int, optional Number of columns in the output. If 0, defaults to N. k: int, optional Index of the diagonal: 0 (the default) refers to the main diagonal, a positive value refers to an upper diagonal, and a negative value to a lower diagonal. ctx: Context, optional An optional device context (default is the current default context) dtype: str or numpy.dtype, optional An optional value type (default is `float32`) Returns ------- NDArray A created array Examples -------- >>> mx.nd.eye(2) [[ 1. 0.] [ 0. 1.]] <NDArray 2x2 @cpu(0)> >>> mx.nd.eye(2, 3, 1) [[ 0. 1. 0.] [ 0. 0. 1.]] <NDArray 2x3 @cpu(0)>
def get_fields_with_prop(cls, prop_key): ret = [] for key, val in getattr(cls, '_fields').items(): if hasattr(val, prop_key): ret.append((key, getattr(val, prop_key))) return ret
Return a list of fields with a prop key defined Each list item will be a tuple of field name containing the prop key & the value of that prop key. :param prop_key: key name :return: list of tuples