code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def data_filler_customer(self, number_of_rows, cursor, conn): customer_data = [] try: for i in range(0, number_of_rows): customer_data.append(( rnd_id_generator(self), self.faker.first_name(), self.faker.last_name(), self.faker.address(), self.faker.country(), self.faker.city(), self.faker.date(pattern="%d-%m-%Y"), self.faker.date(pattern="%d-%m-%Y"), self.faker.safe_email(), self.faker.phone_number(), self.faker.locale())) customer_payload = ("INSERT INTO customer " "(id, name, lastname, address, country, city, registry_date, birthdate, email, " "phone_number, locale)" "VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)") cursor.executemany(customer_payload, customer_data) conn.commit() logger.warning('detailed_registration Commits are successful after write job!', extra=extra_information) except Exception as e: logger.error(e, extra=extra_information)
creates and fills the table with customer
def get_notifications(self, start=None, stop=None, *args, **kwargs): filter_kwargs = {} if start is not None: filter_kwargs['%s__gte' % self.notification_id_name] = start + 1 if stop is not None: filter_kwargs['%s__lt' % self.notification_id_name] = stop + 1 objects = self.record_class.objects.filter(**filter_kwargs) if hasattr(self.record_class, 'application_name'): objects = objects.filter(application_name=self.application_name) if hasattr(self.record_class, 'pipeline_id'): objects = objects.filter(pipeline_id=self.pipeline_id) objects = objects.order_by('%s' % self.notification_id_name) return objects.all()
Returns all records in the table.
def fingerprint_relaxation(T, p0, obs, tau=1, k=None, ncv=None): r T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric') n = T.shape[0] if not is_reversible(T): raise ValueError('Fingerprint calculation is not supported for nonreversible transition matrices. ') p0 = _types.ensure_ndarray(p0, ndim=1, size=n, kind='numeric') obs = _types.ensure_ndarray(obs, ndim=1, size=n, kind='numeric') if _issparse(T): return sparse.fingerprints.fingerprint_relaxation(T, p0, obs, tau=tau, k=k, ncv=ncv) else: return dense.fingerprints.fingerprint_relaxation(T, p0, obs, tau=tau, k=k)
r"""Dynamical fingerprint for relaxation experiment. The dynamical fingerprint is given by the implied time-scale spectrum together with the corresponding amplitudes. Parameters ---------- T : (M, M) ndarray or scipy.sparse matrix Transition matrix obs1 : (M,) ndarray Observable, represented as vector on state space obs2 : (M,) ndarray (optional) Second observable, for cross-correlations k : int (optional) Number of time-scales and amplitudes to compute tau : int (optional) Lag time of given transition matrix, for correct time-scales ncv : int (optional) The number of Lanczos vectors generated, `ncv` must be greater than k; it is recommended that ncv > 2*k Returns ------- timescales : (N,) ndarray Time-scales of the transition matrix amplitudes : (N,) ndarray Amplitudes for the relaxation experiment See also -------- relaxation, fingerprint_correlation References ---------- .. [1] Noe, F, S Doose, I Daidone, M Loellmann, M Sauer, J D Chodera and J Smith. 2010. Dynamical fingerprints for probing individual relaxation processes in biomolecular dynamics with simulations and kinetic experiments. PNAS 108 (12): 4822-4827. Notes ----- Fingerprints are a combination of time-scale and amplitude spectrum for a equilibrium correlation or a non-equilibrium relaxation experiment. **Relaxation** A relaxation experiment looks at the time dependent expectation value of an observable for a system out of equilibrium .. math:: \mathbb{E}_{w_{0}}[a(x, t)]=\sum_x w_0(x) a(x, t)=\sum_x w_0(x) \sum_y p^t(x, y) a(y). The fingerprint amplitudes :math:`\gamma_i` are given by .. math:: \gamma_i=\langle w_0, r_i\rangle \langle l_i, a \rangle. And the fingerprint time scales :math:`t_i` are given by .. math:: t_i=-\frac{\tau}{\log \lvert \lambda_i \rvert}. Examples -------- >>> import numpy as np >>> from msmtools.analysis import fingerprint_relaxation >>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]]) >>> p0 = np.array([1.0, 0.0, 0.0]) >>> a = np.array([1.0, 0.0, 0.0]) >>> ts, amp = fingerprint_relaxation(T, p0, a) >>> ts array([ inf, 9.49122158, 0.43429448]) >>> amp array([ 0.45454545, 0.5 , 0.04545455])
def get_block_from_consensus( self, consensus_hash ): query = 'SELECT block_id FROM snapshots WHERE consensus_hash = ?;' args = (consensus_hash,) con = self.db_open(self.impl, self.working_dir) rows = self.db_query_execute(con, query, args, verbose=False) res = None for r in rows: res = r['block_id'] con.close() return res
Get the block number with the given consensus hash. Return None if there is no such block.
def send_mass_template_mail(subject_template, body_template, recipients, context=None): if context: subject, body = render_mail_template(subject_template, body_template, context) else: subject, body = subject_template, body_template message_tuples = [(subject, body, conf.get('DEFAULT_FROM_EMAIL'), [r]) for r in recipients] send_mass_mail(message_tuples)
Renders an email subject and body using the given templates and context, then sends it to the given recipients list. The emails are send one-by-one.
def bookmark_list(): client = get_client() bookmark_iterator = client.bookmark_list() def get_ep_name(item): ep_id = item["endpoint_id"] try: ep_doc = client.get_endpoint(ep_id) return display_name_or_cname(ep_doc) except TransferAPIError as err: if err.code == "EndpointDeleted": return "[DELETED ENDPOINT]" else: raise err formatted_print( bookmark_iterator, fields=[ ("Name", "name"), ("Bookmark ID", "id"), ("Endpoint ID", "endpoint_id"), ("Endpoint Name", get_ep_name), ("Path", "path"), ], response_key="DATA", json_converter=iterable_response_to_dict, )
Executor for `globus bookmark list`
def _load_audio_file(self): self._step_begin(u"load audio file") audio_file = AudioFile( file_path=self.task.audio_file_path_absolute, file_format=None, rconf=self.rconf, logger=self.logger ) audio_file.read_samples_from_file() self._step_end() return audio_file
Load audio in memory. :rtype: :class:`~aeneas.audiofile.AudioFile`
def save_method(elements, module_path): for elem, signature in elements.items(): if isinstance(signature, dict): save_method(signature, module_path + (elem,)) elif isinstance(signature, Class): save_method(signature.fields, module_path + (elem,)) elif signature.ismethod(): if elem in methods and module_path[0] != '__dispatch__': assert elem in MODULES['__dispatch__'] path = ('__dispatch__',) methods[elem] = (path, MODULES['__dispatch__'][elem]) else: methods[elem] = (module_path, signature)
Recursively save methods with module name and signature.
def link(self): if self.linked: return self self.linked = True included_modules = [] for include in self.includes.values(): included_modules.append(include.link().surface) self.scope.add_surface('__includes__', tuple(included_modules)) self.scope.add_surface('__thrift_source__', self.thrift_source) for linker in LINKERS: linker(self.scope).link() self.scope.add_surface('loads', Deserializer(self.protocol)) self.scope.add_surface('dumps', Serializer(self.protocol)) return self
Link all the types in this module and all included modules.
def _get(self, word1, word2): key = self._WSEP.join([self._sanitize(word1), self._sanitize(word2)]) key = key.lower() if key not in self._db: return return sample(self._db[key], 1)[0]
Return a possible next word after ``word1`` and ``word2``, or ``None`` if there's no possibility.
def hue(self, hue): if hue < 0 or hue > 1: raise ValueError("Hue must be a percentage " "represented as decimal 0-1.0") self._hue = hue cmd = self.command_set.hue(hue) self.send(cmd)
Set the group hue. :param hue: Hue in decimal percent (0.0-1.0).
def text(self, path, compression=None, lineSep=None): self._set_opts(compression=compression, lineSep=lineSep) self._jwrite.text(path)
Saves the content of the DataFrame in a text file at the specified path. The text files will be encoded as UTF-8. :param path: the path in any Hadoop supported file system :param compression: compression codec to use when saving to file. This can be one of the known case-insensitive shorten names (none, bzip2, gzip, lz4, snappy and deflate). :param lineSep: defines the line separator that should be used for writing. If None is set, it uses the default value, ``\\n``. The DataFrame must have only one column that is of string type. Each row becomes a new line in the output file.
def render_context_with_title(self, context): if "page_title" not in context: con = template.Context(context) temp = template.Template(encoding.force_text(self.page_title)) context["page_title"] = temp.render(con) return context
Render a page title and insert it into the context. This function takes in a context dict and uses it to render the page_title variable. It then appends this title to the context using the 'page_title' key. If there is already a page_title key defined in context received then this function will do nothing.
def _generate_badge(self, subject, status): url = 'https://img.shields.io/badge/%s-%s-brightgreen.svg' \ '?style=flat&maxAge=3600' % (subject, status) logger.debug("Getting badge for %s => %s (%s)", subject, status, url) res = requests.get(url) if res.status_code != 200: raise Exception("Error: got status %s for shields.io badge: %s", res.status_code, res.text) logger.debug('Got %d character response from shields.io', len(res.text)) return res.text
Generate SVG for one badge via shields.io. :param subject: subject; left-hand side of badge :type subject: str :param status: status; right-hand side of badge :type status: str :return: badge SVG :rtype: str
def handshake(self): if self._socket is None: self._socket = self._connect() return self.call('handshake', {"Version": 1}, expect_body=False)
Sets up the connection with the Serf agent and does the initial handshake.
def get_repository_owner_and_name() -> Tuple[str, str]: check_repo() url = repo.remote('origin').url parts = re.search(r'([^/:]+)/([^/]+).git$', url) if not parts: raise HvcsRepoParseError debug('get_repository_owner_and_name', parts) return parts.group(1), parts.group(2)
Checks the origin remote to get the owner and name of the remote repository. :return: A tuple of the owner and name.
def assert_is_instance(obj, cls, msg_fmt="{msg}"): if not isinstance(obj, cls): msg = "{!r} is an instance of {!r}, expected {!r}".format( obj, obj.__class__, cls ) types = cls if isinstance(cls, tuple) else (cls,) fail(msg_fmt.format(msg=msg, obj=obj, types=types))
Fail if an object is not an instance of a class or tuple of classes. >>> assert_is_instance(5, int) >>> assert_is_instance('foo', (str, bytes)) >>> assert_is_instance(5, str) Traceback (most recent call last): ... AssertionError: 5 is an instance of <class 'int'>, expected <class 'str'> The following msg_fmt arguments are supported: * msg - the default error message * obj - object to test * types - tuple of types tested against
def add_service_port(service, port): if service not in get_services(permanent=True): raise CommandExecutionError('The service does not exist.') cmd = '--permanent --service={0} --add-port={1}'.format(service, port) return __firewall_cmd(cmd)
Add a new port to the specified service. .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' firewalld.add_service_port zone 80
def _remember_avatarness( self, character, graph, node, is_avatar=True, branch=None, turn=None, tick=None ): branch = branch or self.branch turn = turn or self.turn tick = tick or self.tick self._avatarness_cache.store( character, graph, node, branch, turn, tick, is_avatar ) self.query.avatar_set( character, graph, node, branch, turn, tick, is_avatar )
Use this to record a change in avatarness. Should be called whenever a node that wasn't an avatar of a character now is, and whenever a node that was an avatar of a character now isn't. ``character`` is the one using the node as an avatar, ``graph`` is the character the node is in.
def _check(self, file): if not os.path.exists(file): raise Error("file \"{}\" not found".format(file)) _, extension = os.path.splitext(file) try: check = self.extension_map[extension[1:]] except KeyError: magic_type = magic.from_file(file) for name, cls in self.magic_map.items(): if name in magic_type: check = cls break else: raise Error("unknown file type \"{}\", skipping...".format(file)) try: with open(file) as f: code = "\n".join(line.rstrip() for line in f) except UnicodeDecodeError: raise Error("file does not seem to contain text, skipping...") try: if code[-1] != '\n': code += '\n' except IndexError: pass return check(code)
Run apropriate check based on `file`'s extension and return it, otherwise raise an Error
def resource_to_url(resource, request=None, quote=False): if request is None: request = get_current_request() reg = get_current_registry() cnv = reg.getAdapter(request, IResourceUrlConverter) return cnv.resource_to_url(resource, quote=quote)
Converts the given resource to a URL. :param request: Request object (required for the host name part of the URL). If this is not given, the current request is used. :param bool quote: If set, the URL returned will be quoted.
def compute_gas_limit_bounds(parent: BlockHeader) -> Tuple[int, int]: boundary_range = parent.gas_limit // GAS_LIMIT_ADJUSTMENT_FACTOR upper_bound = parent.gas_limit + boundary_range lower_bound = max(GAS_LIMIT_MINIMUM, parent.gas_limit - boundary_range) return lower_bound, upper_bound
Compute the boundaries for the block gas limit based on the parent block.
def _search_indicators_page_generator(self, search_term=None, enclave_ids=None, from_time=None, to_time=None, indicator_types=None, tags=None, excluded_tags=None, start_page=0, page_size=None): get_page = functools.partial(self.search_indicators_page, search_term, enclave_ids, from_time, to_time, indicator_types, tags, excluded_tags) return Page.get_page_generator(get_page, start_page, page_size)
Creates a generator from the |search_indicators_page| method that returns each successive page. :param str search_term: The term to search for. If empty, no search term will be applied. Otherwise, must be at least 3 characters. :param list(str) enclave_ids: list of enclave ids used to restrict indicators to specific enclaves (optional - by default indicators from all of user's enclaves are returned) :param int from_time: start of time window in milliseconds since epoch (optional) :param int to_time: end of time window in milliseconds since epoch (optional) :param list(str) indicator_types: a list of indicator types to filter by (optional) :param list(str) tags: Name (or list of names) of tag(s) to filter indicators by. Only indicators containing ALL of these tags will be returned. (optional) :param list(str) excluded_tags: Indicators containing ANY of these tags will be excluded from the results. :param int start_page: The page to start on. :param page_size: The size of each page. :return: The generator.
def has_foreign_key(self, name): name = self._normalize_identifier(name) return name in self._fk_constraints
Returns whether this table has a foreign key constraint with the given name. :param name: The constraint name :type name: str :rtype: bool
def cart_to_polar(arr_c): if arr_c.shape[-1] == 1: arr_p = arr_c.copy() elif arr_c.shape[-1] == 2: arr_p = np.empty_like(arr_c) arr_p[..., 0] = vector_mag(arr_c) arr_p[..., 1] = np.arctan2(arr_c[..., 1], arr_c[..., 0]) elif arr_c.shape[-1] == 3: arr_p = np.empty_like(arr_c) arr_p[..., 0] = vector_mag(arr_c) arr_p[..., 1] = np.arccos(arr_c[..., 2] / arr_p[..., 0]) arr_p[..., 2] = np.arctan2(arr_c[..., 1], arr_c[..., 0]) else: raise Exception('Invalid vector for polar representation') return arr_p
Return cartesian vectors in their polar representation. Parameters ---------- arr_c: array, shape (a1, a2, ..., d) Cartesian vectors, with last axis indexing the dimension. Returns ------- arr_p: array, shape of arr_c Polar vectors, using (radius, inclination, azimuth) convention.
def mock_decorator_with_params(*oargs, **okwargs): def inner(fn, *iargs, **ikwargs): if hasattr(fn, '__call__'): return fn return Mock() return inner
Optionally mock a decorator that takes parameters E.g.: @blah(stuff=True) def things(): pass
def first_return(): walk = randwalk() >> drop(1) >> takewhile(lambda v: v != Origin) >> list return len(walk)
Generate a random walk and return its length upto the moment that the walker first returns to the origin. It is mathematically provable that the walker will eventually return, meaning that the function call will halt, although it may take a *very* long time and your computer may run out of memory! Thus, try this interactively only.
def clean(self): if self.cleaners: yield from asyncio.wait([x() for x in self.cleaners], loop=self.loop)
Run all of the cleaners added by the user.
def __set_token_expired(self, value): self._token_expired = datetime.datetime.now() + datetime.timedelta(seconds=value) return
Internal helper for oauth code
def fetch(self): params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return AvailablePhoneNumberCountryInstance( self._version, payload, account_sid=self._solution['account_sid'], country_code=self._solution['country_code'], )
Fetch a AvailablePhoneNumberCountryInstance :returns: Fetched AvailablePhoneNumberCountryInstance :rtype: twilio.rest.api.v2010.account.available_phone_number.AvailablePhoneNumberCountryInstance
def NotEqualTo(self, value): self._awql = self._CreateSingleValueCondition(value, '!=') return self._query_builder
Sets the type of the WHERE clause as "not equal to". Args: value: The value to be used in the WHERE condition. Returns: The query builder that this WHERE builder links to.
def merge(directory=None, revisions='', message=None, branch_label=None, rev_id=None): if alembic_version >= (0, 7, 0): config = current_app.extensions['migrate'].migrate.get_config( directory) command.merge(config, revisions, message=message, branch_label=branch_label, rev_id=rev_id) else: raise RuntimeError('Alembic 0.7.0 or greater is required')
Merge two revisions together. Creates a new migration file
def handle_os_exceptions(): try: yield except ObjectException: exc_type, exc_value, _ = exc_info() raise _OS_EXCEPTIONS.get(exc_type, OSError)(exc_value) except (OSError, same_file_error, UnsupportedOperation): raise except Exception: exc_type, exc_value, _ = exc_info() raise OSError('%s%s' % ( exc_type, (', %s' % exc_value) if exc_value else ''))
Handles pycosio exceptions and raise standard OS exceptions.
def deep_del(data, fn): result = {} for k, v in data.iteritems(): if not fn(v): if isinstance(v, dict): result[k] = deep_del(v, fn) else: result[k] = v return result
Create dict copy with removed items. Recursively remove items where fn(value) is True. Returns: dict: New dict with matching items removed.
def get_search_scores(query, choices, ignore_case=True, template='{}', valid_only=False, sort=False): query = query.replace(' ', '') pattern = get_search_regex(query, ignore_case) results = [] for choice in choices: r = re.search(pattern, choice) if query and r: result = get_search_score(query, choice, ignore_case=ignore_case, apply_regex=False, template=template) else: if query: result = (choice, choice, NOT_FOUND_SCORE) else: result = (choice, choice, NO_SCORE) if valid_only: if result[-1] != NOT_FOUND_SCORE: results.append(result) else: results.append(result) if sort: results = sorted(results, key=lambda row: row[-1]) return results
Search for query inside choices and return a list of tuples. Returns a list of tuples of text with the enriched text (if a template is provided) and a score for the match. Lower scores imply a better match. Parameters ---------- query : str String with letters to search in each choice (in order of appearance). choices : list of str List of sentences/words in which to search for the 'query' letters. ignore_case : bool, optional Optional value perform a case insensitive search (True by default). template : str, optional Optional template string to surround letters found in choices. This is useful when using a rich text editor ('{}' by default). Examples: '<b>{}</b>', '<code>{}</code>', '<i>{}</i>' Returns ------- results : list of tuples List of tuples where the first item is the text (enriched if a template was used) and a search score. Lower scores means better match.
def prepare_inputseries(self, ramflag: bool = True) -> None: for element in printtools.progressbar(self): element.prepare_inputseries(ramflag)
Call method |Element.prepare_inputseries| of all handled |Element| objects.
def data_in_label(intvl_in, dtype_in_time, dtype_in_vert=False): intvl_lbl = intvl_in time_lbl = dtype_in_time lbl = '_'.join(['from', intvl_lbl, time_lbl]).replace('__', '_') vert_lbl = dtype_in_vert if dtype_in_vert else False if vert_lbl: lbl = '_'.join([lbl, vert_lbl]).replace('__', '_') return lbl
Create string label specifying the input data of a calculation.
def crypto_secretstream_xchacha20poly1305_init_push(state, key): ensure( isinstance(state, crypto_secretstream_xchacha20poly1305_state), 'State must be a crypto_secretstream_xchacha20poly1305_state object', raising=exc.TypeError, ) ensure( isinstance(key, bytes), 'Key must be a bytes sequence', raising=exc.TypeError, ) ensure( len(key) == crypto_secretstream_xchacha20poly1305_KEYBYTES, 'Invalid key length', raising=exc.ValueError, ) headerbuf = ffi.new( "unsigned char []", crypto_secretstream_xchacha20poly1305_HEADERBYTES, ) rc = lib.crypto_secretstream_xchacha20poly1305_init_push( state.statebuf, headerbuf, key) ensure(rc == 0, 'Unexpected failure', raising=exc.RuntimeError) return ffi.buffer(headerbuf)[:]
Initialize a crypto_secretstream_xchacha20poly1305 encryption buffer. :param state: a secretstream state object :type state: crypto_secretstream_xchacha20poly1305_state :param key: must be :data:`.crypto_secretstream_xchacha20poly1305_KEYBYTES` long :type key: bytes :return: header :rtype: bytes
def get_forwarding_information_base(self, filter=''): uri = "{}{}".format(self.data["uri"], self.FORWARDING_INFORMATION_PATH) return self._helper.get_collection(uri, filter=filter)
Gets the forwarding information base data for a logical interconnect. A maximum of 100 entries is returned. Optional filtering criteria might be specified. Args: filter (list or str): Filtering criteria may be specified using supported attributes: interconnectUri, macAddress, internalVlan, externalVlan, and supported relation = (Equals). macAddress is 12 hexadecimal digits with a colon between each pair of digits (upper case or lower case). The default is no filter; all resources are returned. Returns: list: A set of interconnect MAC address entries.
def is_installable_dir(path): if not os.path.isdir(path): return False setup_py = os.path.join(path, "setup.py") if os.path.isfile(setup_py): return True return False
Return True if `path` is a directory containing a setup.py file.
def fixed_indexer(self): isfixed = self.pst.parameter_data.partrans.\ apply(lambda x : x in ["fixed","tied"]) return isfixed.values
indexer for fixed status Returns ------- fixed_indexer : pandas.Series
def scan(cls, path): result = [] try: for _p in listdir(path): try: result.append(Template(_p, op.join(path, _p))) except ValueError: continue except OSError: pass return result
Scan directory for templates.
def _generate_sequences_for_texts(self, l1, t1, l2, t2, ngrams): self._reverse_substitutes = dict((v, k) for k, v in self._substitutes.items()) sequences = [] covered_spans = [[], []] for ngram in ngrams: sequences.extend(self._generate_sequences_for_ngram( t1, t2, ngram, covered_spans)) if sequences: sequences.sort(key=lambda x: x.start_index) context = {'l1': l1, 'l2': l2, 'sequences': sequences} report_name = '{}-{}.html'.format(l1, l2) os.makedirs(self._output_dir, exist_ok=True) self._write(context, self._output_dir, report_name)
Generates and outputs aligned sequences for the texts `t1` and `t2` from `ngrams`. :param l1: label of first witness :type l1: `str` :param t1: text content of first witness :type t1: `str` :param l2: label of second witness :type l2: `str` :param t2: text content of second witness :type t2: `str` :param ngrams: n-grams to base sequences on :type ngrams: `list` of `str`
def kill_processes(self): LOGGER.critical('Max shutdown exceeded, forcibly exiting') processes = self.active_processes(False) while processes: for proc in self.active_processes(False): if int(proc.pid) != int(os.getpid()): LOGGER.warning('Killing %s (%s)', proc.name, proc.pid) try: os.kill(int(proc.pid), signal.SIGKILL) except OSError: pass else: LOGGER.warning('Cowardly refusing kill self (%s, %s)', proc.pid, os.getpid()) time.sleep(0.5) processes = self.active_processes(False) LOGGER.info('Killed all children') return self.set_state(self.STATE_STOPPED)
Gets called on shutdown by the timer when too much time has gone by, calling the terminate method instead of nicely asking for the consumers to stop.
def checkout_with_fetch(git_folder, refspec, repository="origin"): _LOGGER.info("Trying to fetch and checkout %s", refspec) repo = Repo(str(git_folder)) repo.git.fetch(repository, refspec) repo.git.checkout("FETCH_HEAD") _LOGGER.info("Fetch and checkout success for %s", refspec)
Fetch the refspec, and checkout FETCH_HEAD. Beware that you will ne in detached head mode.
def xyz(self, arrnx3): if not self.children: if not arrnx3.shape[0] == 1: raise ValueError( 'Trying to set position of {} with more than one' 'coordinate: {}'.format( self, arrnx3)) self.pos = np.squeeze(arrnx3) else: for atom, coords in zip( self._particles( include_ports=False), arrnx3): atom.pos = coords
Set the positions of the particles in the Compound, excluding the Ports. This function does not set the position of the ports. Parameters ---------- arrnx3 : np.ndarray, shape=(n,3), dtype=float The new particle positions
def get_formats(function_types=None): if function_types is None: return {k: v['display'] for k, v in _converter_map.items()} ftypes = [x.lower() for x in function_types] ftypes = set(ftypes) ret = [] for fmt, v in _converter_map.items(): if v['valid'] is None or ftypes <= v['valid']: ret.append(fmt) return ret
Returns the available formats mapped to display name. This is returned as an ordered dictionary, with the most common at the top, followed by the rest in alphabetical order If a list is specified for function_types, only those formats supporting the given function types will be returned.
def extract_original_links(base_url, bs4): valid_url = convert_invalid_url(base_url) url = urlparse(valid_url) base_url = '{}://{}'.format(url.scheme, url.netloc) base_url_with_www = '{}://www.{}'.format(url.scheme, url.netloc) links = extract_links(bs4) result_links = [anchor for anchor in links if anchor.startswith(base_url)] result_links_www = [anchor for anchor in links if anchor.startswith(base_url_with_www)] return list(set(result_links + result_links_www))
Extracting links that contains specific url from BeautifulSoup object :param base_url: `str` specific url that matched with the links :param bs4: `BeautifulSoup` :return: `list` List of links
def remove(self, priority, observer, callble): self.flush() for i in range(len(self) - 1, -1, -1): p,o,c = self[i] if priority==p and observer==o and callble==c: del self._poc[i]
Remove one observer, which had priority and callble.
def GetAccounts(self): selector = { 'fields': ['CustomerId', 'CanManageClients'] } accounts = self.client.GetService('ManagedCustomerService').get(selector) return accounts['entries']
Return the client accounts associated with the user's manager account. Returns: list List of ManagedCustomer data objects.
def send_ether_over_wpa(self, pkt, **kwargs): payload = LLC() / SNAP() / pkt[Ether].payload dest = pkt.dst if dest == "ff:ff:ff:ff:ff:ff": self.send_wpa_to_group(payload, dest) else: assert dest == self.client self.send_wpa_to_client(payload)
Send an Ethernet packet using the WPA channel Extra arguments will be ignored, and are just left for compatibility
def add(self, pattern, start): "Recursively adds a linear pattern to the AC automaton" if not pattern: return [start] if isinstance(pattern[0], tuple): match_nodes = [] for alternative in pattern[0]: end_nodes = self.add(alternative, start=start) for end in end_nodes: match_nodes.extend(self.add(pattern[1:], end)) return match_nodes else: if pattern[0] not in start.transition_table: next_node = BMNode() start.transition_table[pattern[0]] = next_node else: next_node = start.transition_table[pattern[0]] if pattern[1:]: end_nodes = self.add(pattern[1:], start=next_node) else: end_nodes = [next_node] return end_nodes
Recursively adds a linear pattern to the AC automaton
def identical(self, o): return self.bits == o.bits and self.stride == o.stride and self.lower_bound == o.lower_bound and self.upper_bound == o.upper_bound
Used to make exact comparisons between two StridedIntervals. Usually it is only used in test cases. :param o: The other StridedInterval to compare with. :return: True if they are exactly same, False otherwise.
def connect_output(self, node): if len(self.outputs) == self.max_outputs: raise TooManyOutputsError("Attempted to connect too many nodes to the output of a node", max_outputs=self.max_outputs, stream=self.stream) self.outputs.append(node)
Connect another node to our output. This downstream node will automatically be triggered when we update our output. Args: node (SGNode): The node that should receive our output
def full_load(self): self.parse_data_directories() class RichHeader(object): pass rich_header = self.parse_rich_header() if rich_header: self.RICH_HEADER = RichHeader() self.RICH_HEADER.checksum = rich_header.get('checksum', None) self.RICH_HEADER.values = rich_header.get('values', None) self.RICH_HEADER.key = rich_header.get('key', None) self.RICH_HEADER.raw_data = rich_header.get('raw_data', None) self.RICH_HEADER.clear_data = rich_header.get('clear_data', None) else: self.RICH_HEADER = None
Process the data directories. This method will load the data directories which might not have been loaded if the "fast_load" option was used.
def is_scalar(value: Any) -> bool: return ( getattr(value, 'ndim', None) == 0 or isinstance(value, (str, bytes)) or not isinstance(value, (Iterable, ) + dask_array_type))
Whether to treat a value as a scalar. Any non-iterable, string, or 0-D array
def add(self, element): if not isinstance(element, six.string_types): raise TypeError("Hll elements can only be strings") self._adds.add(element)
Adds an element to the HyperLogLog. Datatype cardinality will be updated when the object is saved. :param element: the element to add :type element: str
def remove_zero_points(self): points_of_interest = np.where((np.linalg.norm(self.point_cloud.data, axis=0) != 0.0) & (np.linalg.norm(self.normal_cloud.data, axis=0) != 0.0) & (np.isfinite(self.normal_cloud.data[0,:])))[0] self.point_cloud._data = self.point_cloud.data[:, points_of_interest] self.normal_cloud._data = self.normal_cloud.data[:, points_of_interest]
Remove all elements where the norms and points are zero. Note ---- This returns nothing and updates the NormalCloud in-place.
def create(cls, name, md5_password=None, connect_retry=120, session_hold_timer=180, session_keep_alive=60): json = {'name': name, 'connect': connect_retry, 'session_hold_timer': session_hold_timer, 'session_keep_alive': session_keep_alive} if md5_password: json.update(md5_password=md5_password) return ElementCreator(cls, json)
Create a new BGP Connection Profile. :param str name: name of profile :param str md5_password: optional md5 password :param int connect_retry: The connect retry timer, in seconds :param int session_hold_timer: The session hold timer, in seconds :param int session_keep_alive: The session keep alive timer, in seconds :raises CreateElementFailed: failed creating profile :return: instance with meta :rtype: BGPConnectionProfile
def events(cls, filters): current = filters.pop('current', False) current_params = [] if current: current_params = [('current', 'true')] filter_url = uparse.urlencode(sorted(list(filters.items())) + current_params) events = cls.json_get('%s/events?%s' % (cls.api_url, filter_url), empty_key=True, send_key=False) return events
Retrieve events details from status.gandi.net.
def runTemplate(id, data={}): conn = Qubole.agent() path = str(id) + "/run" res = conn.post(Template.element_path(path), data) cmdType = res['command_type'] cmdId = res['id'] cmdClass = eval(cmdType) cmd = cmdClass.find(cmdId) while not Command.is_done(cmd.status): time.sleep(Qubole.poll_interval) cmd = cmdClass.find(cmd.id) return Template.getResult(cmdClass, cmd)
Run an existing Template and waits for the Result. Prints result to stdout. Args: `id`: ID of the template to run `data`: json data containing the input_vars Returns: An integer as status (0: success, 1: failure)
def _replace_property(property_key, property_value, resource, logical_id): if property_key and property_value: resource.get(PROPERTIES_KEY, {})[property_key] = property_value elif property_key or property_value: LOG.info("WARNING: Ignoring Metadata for Resource %s. Metadata contains only aws:asset:path or " "aws:assert:property but not both", logical_id)
Replace a property with an asset on a given resource This method will mutate the template Parameters ---------- property str The property to replace on the resource property_value str The new value of the property resource dict Dictionary representing the Resource to change logical_id str LogicalId of the Resource
def define_log_renderer(fmt, fpath, quiet): if fmt: return structlog.processors.JSONRenderer() if fpath is not None: return structlog.processors.JSONRenderer() if sys.stderr.isatty() and not quiet: return structlog.dev.ConsoleRenderer() return structlog.processors.JSONRenderer()
the final log processor that structlog requires to render.
def run_task(factory, **kwargs): context = TaskContext(factory, **kwargs) pstats_dir = kwargs.get("pstats_dir", os.getenv(PSTATS_DIR)) if pstats_dir: import cProfile import tempfile import pydoop.hdfs as hdfs hdfs.mkdir(pstats_dir) fd, pstats_fn = tempfile.mkstemp(suffix=".pstats") os.close(fd) cProfile.runctx( "_run(context, **kwargs)", globals(), locals(), filename=pstats_fn ) pstats_fmt = kwargs.get( "pstats_fmt", os.getenv(PSTATS_FMT, DEFAULT_PSTATS_FMT) ) name = pstats_fmt % ( context.task_type, context.get_task_partition(), os.path.basename(pstats_fn) ) hdfs.put(pstats_fn, hdfs.path.join(pstats_dir, name)) else: _run(context, **kwargs)
\ Run a MapReduce task. Available keyword arguments: * ``raw_keys`` (default: :obj:`False`): pass map input keys to context as byte strings (ignore any type information) * ``raw_values`` (default: :obj:`False`): pass map input values to context as byte strings (ignore any type information) * ``private_encoding`` (default: :obj:`True`): automatically serialize map output k/v and deserialize reduce input k/v (pickle) * ``auto_serialize`` (default: :obj:`True`): automatically serialize reduce output (map output in map-only jobs) k/v (call str/unicode then encode as utf-8) Advanced keyword arguments: * ``pstats_dir``: run the task with cProfile and store stats in this dir * ``pstats_fmt``: use this pattern for pstats filenames (experts only) The pstats dir and filename pattern can also be provided via ``pydoop submit`` arguments, with lower precedence in case of clashes.
def run_strelka(job, tumor_bam, normal_bam, univ_options, strelka_options, split=True): if strelka_options['chromosomes']: chromosomes = strelka_options['chromosomes'] else: chromosomes = sample_chromosomes(job, strelka_options['genome_fai']) num_cores = min(len(chromosomes), univ_options['max_cores']) strelka = job.wrapJobFn(run_strelka_full, tumor_bam, normal_bam, univ_options, strelka_options, disk=PromisedRequirement(strelka_disk, tumor_bam['tumor_dna_fix_pg_sorted.bam'], normal_bam['normal_dna_fix_pg_sorted.bam'], strelka_options['genome_fasta']), memory='6G', cores=num_cores) job.addChild(strelka) if split: unmerge_strelka = job.wrapJobFn(wrap_unmerge, strelka.rv(), chromosomes, strelka_options, univ_options).encapsulate() strelka.addChild(unmerge_strelka) return unmerge_strelka.rv() else: return strelka.rv()
Run the strelka subgraph on the DNA bams. Optionally split the results into per-chromosome vcfs. :param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq :param dict normal_bam: Dict of bam and bai for normal DNA-Seq :param dict univ_options: Dict of universal options used by almost all tools :param dict strelka_options: Options specific to strelka :param bool split: Should the results be split into perchrom vcfs? :return: Either the fsID to the genome-level vcf or a dict of results from running strelka on every chromosome perchrom_strelka: |- 'chr1': | |-'snvs': fsID | +-'indels': fsID |- 'chr2': | |-'snvs': fsID | +-'indels': fsID |-... | +- 'chrM': |-'snvs': fsID +-'indels': fsID :rtype: toil.fileStore.FileID|dict
def _to_str(dumped_val, encoding='utf-8', ordered=True): _dict = OrderedDict if ordered else dict if isinstance(dumped_val, dict): return OrderedDict((k, _to_str(v, encoding)) for k,v in dumped_val.items()) elif isinstance(dumped_val, (list, tuple)): return [_to_str(v, encoding) for v in dumped_val] elif isinstance(dumped_val, bytes): try: d = dumped_val.decode('utf-8') except Exception: d = repr(dumped_val) return d else: return dumped_val
Convert bytes in a dump value to str, allowing json encode
def _check_psutil(self, instance): custom_tags = instance.get('tags', []) if self._collect_cx_state: self._cx_state_psutil(tags=custom_tags) self._cx_counters_psutil(tags=custom_tags)
Gather metrics about connections states and interfaces counters using psutil facilities
def set_primary_parameters(self, **kwargs): given = sorted(kwargs.keys()) required = sorted(self._PRIMARY_PARAMETERS) if given == required: for (key, value) in kwargs.items(): setattr(self, key, value) else: raise ValueError( 'When passing primary parameter values as initialization ' 'arguments of the instantaneous unit hydrograph class `%s`, ' 'or when using method `set_primary_parameters, one has to ' 'to define all values at once via keyword arguments. ' 'But instead of the primary parameter names `%s` the ' 'following keywords were given: %s.' % (objecttools.classname(self), ', '.join(required), ', '.join(given)))
Set all primary parameters at once.
def encode_chain_list(in_strings): out_bytes = b"" for in_s in in_strings: out_bytes+=in_s.encode('ascii') for i in range(mmtf.utils.constants.CHAIN_LEN -len(in_s)): out_bytes+= mmtf.utils.constants.NULL_BYTE.encode('ascii') return out_bytes
Convert a list of strings to a list of byte arrays. :param in_strings: the input strings :return the encoded list of byte arrays
def function(self, addr=None, name=None, create=False, syscall=False, plt=None): if addr is not None: try: f = self._function_map.get(addr) if plt is None or f.is_plt == plt: return f except KeyError: if create: f = self._function_map[addr] if name is not None: f.name = name if syscall: f.is_syscall=True return f elif name is not None: for func in self._function_map.values(): if func.name == name: if plt is None or func.is_plt == plt: return func return None
Get a function object from the function manager. Pass either `addr` or `name` with the appropriate values. :param int addr: Address of the function. :param str name: Name of the function. :param bool create: Whether to create the function or not if the function does not exist. :param bool syscall: True to create the function as a syscall, False otherwise. :param bool or None plt: True to find the PLT stub, False to find a non-PLT stub, None to disable this restriction. :return: The Function instance, or None if the function is not found and create is False. :rtype: Function or None
def process_global(name, val=None, setval=False): p = current_process() if not hasattr(p, '_pulsar_globals'): p._pulsar_globals = {'lock': Lock()} if setval: p._pulsar_globals[name] = val else: return p._pulsar_globals.get(name)
Access and set global variables for the current process.
def retire_asset_ddo(self, did): response = self.requests_session.delete(f'{self.url}/{did}', headers=self._headers) if response.status_code == 200: logging.debug(f'Removed asset DID: {did} from metadata store') return response raise AquariusGenericError(f'Unable to remove DID: {response}')
Retire asset ddo of Aquarius. :param did: Asset DID string :return: API response (depends on implementation)
def check_server_running(pid): if pid == os.getpid(): return False try: os.kill(pid, 0) return True except OSError as oe: if oe.errno == errno.ESRCH: return False else: raise
Determine if the given process is running
def info(msg, *args, **kw): if len(args) or len(kw): msg = msg.format(*args, **kw) shell.cprint('-- <32>{}<0>'.format(msg))
Print sys message to stdout. System messages should inform about the flow of the script. This should be a major milestones during the build.
def _AppendRecord(self): if not self.values: return cur_record = [] for value in self.values: try: value.OnSaveRecord() except SkipRecord: self._ClearRecord() return except SkipValue: continue cur_record.append(value.value) if len(cur_record) == (cur_record.count(None) + cur_record.count([])): return while None in cur_record: cur_record[cur_record.index(None)] = '' self._result.append(cur_record) self._ClearRecord()
Adds current record to result if well formed.
def register_serialization_method(self, name, serialize_func): if name in self._default_serialization_methods: raise ValueError("Can't replace original %s serialization method") self._serialization_methods[name] = serialize_func
Register a custom serialization method that can be used via schema configuration
def loads(string, filename=None, includedir=''): try: f = io.StringIO(string) except TypeError: raise TypeError("libconf.loads() input string must by unicode") return load(f, filename=filename, includedir=includedir)
Load the contents of ``string`` to a Python object The returned object is a subclass of ``dict`` that exposes string keys as attributes as well. Example: >>> config = libconf.loads('window: { title: "libconfig example"; };') >>> config['window']['title'] 'libconfig example' >>> config.window.title 'libconfig example'
def get_mmax(self, mfd_conf, msr, rake, area): if mfd_conf['Maximum_Magnitude']: self.mmax = mfd_conf['Maximum_Magnitude'] else: self.mmax = msr.get_median_mag(area, rake) if ('Maximum_Magnitude_Uncertainty' in mfd_conf and mfd_conf['Maximum_Magnitude_Uncertainty']): self.mmax_sigma = mfd_conf['Maximum_Magnitude_Uncertainty'] else: self.mmax_sigma = msr.get_std_dev_mag(rake)
Gets the mmax for the fault - reading directly from the config file or using the msr otherwise :param dict mfd_config: Configuration file (see setUp for paramters) :param msr: Instance of :class:`nhlib.scalerel` :param float rake: Rake of the fault (in range -180 to 180) :param float area: Area of the fault surface (km^2)
def profile(model_specification, results_directory, process): model_specification = Path(model_specification) results_directory = Path(results_directory) out_stats_file = results_directory / f'{model_specification.name}'.replace('yaml', 'stats') command = f'run_simulation("{model_specification}", "{results_directory}")' cProfile.runctx(command, globals=globals(), locals=locals(), filename=out_stats_file) if process: out_txt_file = results_directory / (out_stats_file.name + '.txt') with open(out_txt_file, 'w') as f: p = pstats.Stats(str(out_stats_file), stream=f) p.sort_stats('cumulative') p.print_stats()
Run a simulation based on the provided MODEL_SPECIFICATION and profile the run.
def count(self, q): q = "SELECT COUNT(*) %s"%q return int(self.quick(q).split("\n")[1])
Shorthand for counting the results of a specific query. ## Arguments * `q` (str): The query to count. This will be executed as: `"SELECT COUNT(*) %s" % q`. ## Returns * `count` (int): The resulting count.
def as_fs(self): fs = [] fs.append("cpe:2.3:") for i in range(0, len(CPEComponent.ordered_comp_parts)): ck = CPEComponent.ordered_comp_parts[i] lc = self._get_attribute_components(ck) if len(lc) > 1: errmsg = "Incompatible version {0} with formatted string".format( self.VERSION) raise TypeError(errmsg) else: comp = lc[0] if (isinstance(comp, CPEComponentUndefined) or isinstance(comp, CPEComponentEmpty) or isinstance(comp, CPEComponentAnyValue)): v = CPEComponent2_3_FS.VALUE_ANY elif isinstance(comp, CPEComponentNotApplicable): v = CPEComponent2_3_FS.VALUE_NA else: v = comp.as_fs() fs.append(v) fs.append(CPEComponent2_3_FS.SEPARATOR_COMP) return CPE._trim("".join(fs[:-1]))
Returns the CPE Name as formatted string of version 2.3. :returns: CPE Name as formatted string :rtype: string :exception: TypeError - incompatible version
def get(self, id, service='facebook', type='analysis'): return self.request.get(service + '/task/' + type + '/' + id)
Get a given Pylon task :param id: The ID of the task :type id: str :param service: The PYLON service (facebook) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
def ping(self, timeout=0, **kwargs): def rand_id(size=8, chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for _ in range(size)) payload = rand_id() self.ws.ping(payload) opcode, data = self.recv_raw(timeout, [websocket.ABNF.OPCODE_PONG], **kwargs) if data != payload: raise IOError("Pinged server but did not receive correct pong")
THIS DOES NOT WORK, UWSGI DOES NOT RESPOND TO PINGS
def GetRadioButtonSelect(selectList, title="Select", msg=""): root = tkinter.Tk() root.title(title) val = tkinter.IntVar() val.set(0) if msg != "": tkinter.Label(root, text=msg).pack() index = 0 for item in selectList: tkinter.Radiobutton(root, text=item, variable=val, value=index).pack(anchor=tkinter.W) index += 1 tkinter.Button(root, text="OK", fg="black", command=root.quit).pack() root.mainloop() root.destroy() print(selectList[val.get()] + " is selected") return (selectList[val.get()], val.get())
Create radio button window for option selection title: Window name mag: Label of the radio button return (seldctedItem, selectedindex)
def create_rack(self): return Rack( self.networkapi_url, self.user, self.password, self.user_ldap)
Get an instance of rack services facade.
def get_queryset(self): if self.queryset is None: raise ImproperlyConfigured( "'%s' must define 'queryset'" % self.__class__.__name__) return self.queryset()
Check that the queryset is defined and call it.
def compile_all(): print("Compiling for PyQt4: style.qrc -> pyqt_style_rc.py") os.system("pyrcc4 -py3 style.qrc -o pyqt_style_rc.py") print("Compiling for PyQt5: style.qrc -> pyqt5_style_rc.py") os.system("pyrcc5 style.qrc -o pyqt5_style_rc.py") print("Compiling for PySide: style.qrc -> pyside_style_rc.py") os.system("pyside-rcc -py3 style.qrc -o pyside_style_rc.py")
Compile style.qrc using rcc, pyside-rcc and pyrcc4
def _time_from_iso8601_time_naive(value): if len(value) == 8: fmt = _TIMEONLY_NO_FRACTION elif len(value) == 15: fmt = _TIMEONLY_W_MICROS else: raise ValueError("Unknown time format: {}".format(value)) return datetime.datetime.strptime(value, fmt).time()
Convert a zoneless ISO8601 time string to naive datetime time :type value: str :param value: The time string to convert :rtype: :class:`datetime.time` :returns: A datetime time object created from the string :raises ValueError: if the value does not match a known format.
def from_request(cls, request): request_headers = HeaderDict() other_headers = ['CONTENT_TYPE', 'CONTENT_LENGTH'] for header, value in iteritems(request.META): is_header = header.startswith('HTTP_') or header in other_headers normalized_header = cls._normalize_django_header_name(header) if is_header and value: request_headers[normalized_header] = value return request_headers
Generate a HeaderDict based on django request object meta data.
def generateCertificate(cls): key = generate_key() cert = generate_certificate(key) return cls(key=key, cert=cert)
Create and return an X.509 certificate and corresponding private key. :rtype: RTCCertificate
def _CSI(self, cmd): sys.stdout.write('\x1b[') sys.stdout.write(cmd)
Control sequence introducer
def recurrence(self, recurrence): if not is_valid_recurrence(recurrence): raise KeyError("'%s' is not a valid recurrence value" % recurrence) self._recurrence = recurrence
See `recurrence`.
def safe_wraps(wrapper, *args, **kwargs): while isinstance(wrapper, functools.partial): wrapper = wrapper.func return functools.wraps(wrapper, *args, **kwargs)
Safely wraps partial functions.
def folderitems(self): items = super(AnalysisRequestAnalysesView, self).folderitems() self.categories.sort() return items
XXX refactor if possible to non-classic mode
def get_ip_address(): try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 80)) ip_address = s.getsockname()[0] except socket_error as sockerr: if sockerr.errno != errno.ENETUNREACH: raise sockerr ip_address = socket.gethostbyname(socket.getfqdn()) finally: s.close() return ip_address
Simple utility to get host IP address.
def validate_password(entry, username, check_function, password=None, retries=1, save_on_success=True, prompt=None, **check_args): if password is None: password = get_password(entry, username, prompt) for _ in xrange(retries + 1): if check_function(username, password, **check_args): if save_on_success: save_password(entry, password, username) return True log.error("Couldn't successfully authenticate your username & password..") password = get_password(entry, username, prompt, always_ask=True) return False
Validate a password with a check function & retry if the password is incorrect. Useful for after a user has changed their password in LDAP, but their local keychain entry is then out of sync. :param str entry: The keychain entry to fetch a password from. :param str username: The username to authenticate :param func check_function: Check function to use. Should take (username, password, **check_args) :param str password: The password to validate. If `None`, the user will be prompted. :param int retries: Number of retries to prompt the user for. :param bool save_on_success: Save the password if the validation was successful. :param str prompt: Alternate prompt to use when asking for the user's password. :returns: `True` on successful authentication. `False` otherwise. :rtype: bool
def start(self): resp = self.post('start') if resp.is_fail(): return None if 'result' not in resp.data: return None result = resp.data['result'] return { 'user': result['user'], 'ws_host': result['ws_host'], }
Gets the rtm ws_host and user information Returns: None if request failed, else a dict containing "user"(User) and "ws_host"
def _validate_alias_file_path(alias_file_path): if not os.path.exists(alias_file_path): raise CLIError(ALIAS_FILE_NOT_FOUND_ERROR) if os.path.isdir(alias_file_path): raise CLIError(ALIAS_FILE_DIR_ERROR.format(alias_file_path))
Make sure the alias file path is neither non-existant nor a directory Args: The alias file path to import aliases from.
def pytype_to_deps(t): res = set() for hpp_dep in pytype_to_deps_hpp(t): res.add(os.path.join('pythonic', 'types', hpp_dep)) res.add(os.path.join('pythonic', 'include', 'types', hpp_dep)) return res
python -> pythonic type header full path.
def build_input_table(cls, name='inputTableName', input_name='input'): obj = cls(name) obj.exporter = 'get_input_table_name' obj.input_name = input_name return obj
Build an input table parameter :param name: parameter name :type name: str :param input_name: bind input port name :param input_name: str :return: input description :rtype: ParamDef