code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def load (self, jamfile_location): assert isinstance(jamfile_location, basestring) absolute = os.path.join(os.getcwd(), jamfile_location) absolute = os.path.normpath(absolute) jamfile_location = b2.util.path.relpath(os.getcwd(), absolute) mname = self.module_name(jamfile_location...
Loads jamfile at the given location. After loading, project global file and jamfile needed by the loaded one will be loaded recursively. If the jamfile at that location is loaded already, does nothing. Returns the project module for the Jamfile.
def _create_save_scenario_action(self): icon = resources_path('img', 'icons', 'save-as-scenario.svg') self.action_save_scenario = QAction( QIcon(icon), self.tr('Save Current Scenario'), self.iface.mainWindow()) message = self.tr('Save current scenario to text file') ...
Create action for save scenario dialog.
def makePalette(color1, color2, N, hsv=True): if hsv: color1 = rgb2hsv(color1) color2 = rgb2hsv(color2) c1 = np.array(getColor(color1)) c2 = np.array(getColor(color2)) cols = [] for f in np.linspace(0, 1, N - 1, endpoint=True): c = c1 * (1 - f) + c2 * f if hsv: ...
Generate N colors starting from `color1` to `color2` by linear interpolation HSV in or RGB spaces. :param int N: number of output colors. :param color1: first rgb color. :param color2: second rgb color. :param bool hsv: if `False`, interpolation is calculated in RGB space. .. hint:: Example: |...
def split_data(X, y, ratio=(0.8, 0.1, 0.1)): assert(sum(ratio) == 1 and len(ratio) == 3) X_train, X_rest, y_train, y_rest = train_test_split( X, y, train_size=ratio[0]) X_val, X_test, y_val, y_test = train_test_split( X_rest, y_rest, train_size=ratio[1]) return X_train, X_val, X_test, y_...
Splits data into a training, validation, and test set. Args: X: text data y: data labels ratio: the ratio for splitting. Default: (0.8, 0.1, 0.1) Returns: split data: X_train, X_val, X_test, y_train, y_val, y_test
def _HasOOOWrite(self, path): size = tf.io.gfile.stat(path).length old_size = self._finalized_sizes.get(path, None) if size != old_size: if old_size is None: logger.error('File %s created after file %s even though it\'s ' 'lexicographically earlier', path, self._path) ...
Returns whether the path has had an out-of-order write.
def add_title_translation(self, title, language, source=None): title_translation = self._sourced_dict( source, title=title, language=language, ) self._append_to('title_translations', title_translation)
Add title translation. :param title: translated title :type title: string :param language: language for the original title :type language: string (2 characters ISO639-1) :param source: source for the given title :type source: string
def getClassInPackageFromName(className, pkg): n = getAvClassNamesInPackage(pkg) i = n.index(className) c = getAvailableClassesInPackage(pkg) return c[i]
get a class from name within a package
def init_app(self, app, add_context_processor=True): if not hasattr(app, 'login_manager'): self.login_manager.init_app( app, add_context_processor=add_context_processor) self.login_manager.login_message = None self.login_manager.needs_refresh_message =...
Initialize with app configuration
def available_perm_status(user): roles = get_user_roles(user) permission_hash = {} for role in roles: permission_names = role.permission_names_list() for permission_name in permission_names: permission_hash[permission_name] = get_permission( permission_name) in us...
Get a boolean map of the permissions available to a user based on that user's roles.
async def register_storage_library(storage_type: str, c_library: str, entry_point: str) -> None: LOGGER.debug( 'WalletManager.register_storage_library >>> storage_type %s, c_library %s, entry_point %s', storage_type, c_library, entry_point) try: ...
Load a wallet storage plug-in. An indy-sdk wallet storage plug-in is a shared library; relying parties must explicitly load it before creating or opening a wallet with the plug-in. The implementation loads a dynamic library and calls an entry point; internally, the plug-in calls the in...
def verify(self): if self._is_verified: return for proxy in self._proxies.values(): proxy.verify() self._is_verified = True
Verifies expectations on all doubled objects. :raise: ``MockExpectationError`` on the first expectation that is not satisfied, if any.
def name(self): if self._name: return self._name return [ line.strip() for line in self.__doc__.split("\n") if line.strip()][0]
Use the first line of docs string unless name set.
async def async_put_state(self, field: str, data: dict) -> dict: session = self.session.put url = self.api_url + field jsondata = json.dumps(data) response_dict = await async_request(session, url, data=jsondata) return response_dict
Set state of object in deCONZ. Field is a string representing a specific device in deCONZ e.g. field='/lights/1/state'. Data is a json object with what data you want to alter e.g. data={'on': True}. See Dresden Elektroniks REST API documentation for details: http://dresd...
def save_translations(self, instance, translated_data): for meta in self.Meta.model._parler_meta: translations = translated_data.get(meta.rel_name, {}) for lang_code, model_fields in translations.items(): translation = instance._get_translated_model(lang_code, auto_create...
Save translation data into translation objects.
def list(self, wg_uuid, parent=None, flat=False, node_types=None): url = "%(base)s/%(wg_uuid)s/nodes" % { 'base': self.local_base_url, 'wg_uuid': wg_uuid } param = [] if parent: if isinstance(parent, (list,)): if len(parent) >= 1: ...
Get a list of workgroup nodes.
def pluck(self, column): result = self.first([column]) if result: return result[column]
Pluck a single column from the database. :param column: THe column to pluck :type column: str :return: The column value :rtype: mixed
def register_value_producer(self, value_name: str, source: Callable[..., pd.DataFrame]=None, preferred_combiner: Callable=replace_combiner, preferred_post_processor: Callable[..., pd.DataFrame]=None) -> Pipeline: return self._value_manager.register...
Marks a ``Callable`` as the producer of a named value. Parameters ---------- value_name : The name of the new dynamic value pipeline. source : A callable source for the dynamic value pipeline. preferred_combiner : A strategy for combining the ...
def initbinset(self,binset=None): if binset is None: msg="(%s) does not have a defined binset in the wavecat table. The waveset of the spectrum will be used instead."%str(self.bandpass) try: self.binwave = self.bandpass.binset except (KeyError, AttributeError)...
Set ``self.binwave``. By default, wavelength values for binning are inherited from bandpass. If the bandpass has no binning information, then source spectrum wavelengths are used. However, if user provides values, then those are used without question. Parameters -------...
async def _auth_plain(self, username, password): mechanism = "PLAIN" credentials = "\0{}\0{}".format(username, password) encoded_credentials = SMTP.b64enc(credentials) try: code, message = await self.do_cmd( "AUTH", mechanism, encoded_credentials, success=(235...
Performs an authentication attempt using the PLAIN mechanism. Protocol: 1. Format the username and password in a suitable way ; 2. The formatted string is base64-encoded ; 3. The string 'AUTH PLAIN' and a space character are prepended to the base64-encoded us...
def find(self, node, path): return node.find(path, namespaces=self.namespaces)
Wrapper for lxml`s find.
def querydict_to_multidict(query_dict, wrap=None): wrap = wrap or (lambda val: val) return MultiDict(chain.from_iterable( six.moves.zip(repeat(key), (wrap(v) for v in vals)) for key, vals in six.iterlists(query_dict) ))
Returns a new `webob.MultiDict` from a `django.http.QueryDict`. If `wrap` is provided, it's used to wrap the values.
def reverse_complement_sequences(records): logging.info('Applying _reverse_complement_sequences generator: ' 'transforming sequences into reverse complements.') for record in records: rev_record = SeqRecord(record.seq.reverse_complement(), id=record.id, na...
Transform sequences into reverse complements.
def table_from_cwb(source, *args, **kwargs): return EventTable.read(source, 'waveburst', *args, format='root', **kwargs)
Read an `EventTable` from a Coherent WaveBurst ROOT file This function just redirects to the format='root' reader with appropriate defaults.
def command_exists(command): for category, commands in iteritems(command_categories): for existing_command in commands: if existing_command.match(command): return True return False
Check if the given command was registered. In another words if it exists.
async def process_updates(self, updates, fast: typing.Optional[bool] = True): if fast: tasks = [] for update in updates: tasks.append(self.updates_handler.notify(update)) return await asyncio.gather(*tasks) results = [] for update in updates: ...
Process list of updates :param updates: :param fast: :return:
def color_for_level(level): if not color_available: return None return { logging.DEBUG: colorama.Fore.WHITE, logging.INFO: colorama.Fore.BLUE, logging.WARNING: colorama.Fore.YELLOW, logging.ERROR: colorama.Fore.RED, logging.CRITICAL: colorama.Fore.MAGENTA }.ge...
Returns the colorama Fore color for a given log level. If color is not available, returns None.
def absent(name, vhost='/', runas=None): ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} policy_exists = __salt__['rabbitmq.policy_exists']( vhost, name, runas=runas) if not policy_exists: ret['comment'] = 'Policy \'{0} {1}\' is not present.'.format(v...
Ensure the named policy is absent Reference: http://www.rabbitmq.com/ha.html name The name of the policy to remove runas Name of the user to run the command as
def remove_sonos_playlist(self, sonos_playlist): object_id = getattr(sonos_playlist, 'item_id', sonos_playlist) return self.contentDirectory.DestroyObject([('ObjectID', object_id)])
Remove a Sonos playlist. Args: sonos_playlist (DidlPlaylistContainer): Sonos playlist to remove or the item_id (str). Returns: bool: True if succesful, False otherwise Raises: SoCoUPnPException: If sonos_playlist does not point to a valid ...
def OnActivateReader(self, reader): SimpleSCardAppEventObserver.OnActivateReader(self, reader) self.feedbacktext.SetLabel('Activated reader: ' + repr(reader))
Called when a reader is activated by double-clicking on the reader tree control or toolbar.
def close(self): self.stopped.set() for event in self.to_be_stopped: event.set() if self._receiver_thread is not None: self._receiver_thread.join() self._socket.close()
Stop the client.
def with_respect_to(self): try: name = self.order_with_respect_to value = getattr(self, name) except AttributeError: return {} field = getattr(self.__class__, name) if isinstance(field, GenericForeignKey): names = (field.ct_field, field.fk_...
Returns a dict to use as a filter for ordering operations containing the original ``Meta.order_with_respect_to`` value if provided. If the field is a Generic Relation, the dict returned contains names and values for looking up the relation's ``ct_field`` and ``fk_field`` attributes.
def get_unit_property(self, unit_id, property_name): if isinstance(unit_id, (int, np.integer)): if unit_id in self.get_unit_ids(): if unit_id not in self._unit_properties: self._unit_properties[unit_id] = {} if isinstance(property_name, str): ...
This function rerturns the data stored under the property name given from the given unit. Parameters ---------- unit_id: int The unit id for which the property will be returned property_name: str The name of the property Returns ----------...
def clear(self, key=None): if not self.options.enabled: return CACHE_DISABLED logger.debug('clear(key={})'.format(repr(key))) if key is not None and key in self._dict.keys(): del self._dict[key] logger.info('cache cleared for key: ' + repr(key)) elif n...
Clear a cache entry, or the entire cache if no key is given Returns CACHE_DISABLED if the cache is disabled Returns True on successful operation :param key: optional key to limit the clear operation to (defaults to None)
def _get_response(self, connection): response_header = self._receive(connection, 13) logger.debug('Response header: %s', response_header) if (not response_header.startswith(b'ZBXD\x01') or len(response_header) != 13): logger.debug('Zabbix return not valid response.') ...
Get response from zabbix server, reads from self.socket. :type connection: :class:`socket._socketobject` :param connection: Socket to read. :rtype: dict :return: Response from zabbix server or False in case of error.
def byte_adaptor(fbuffer): if six.PY3: strings = fbuffer.read().decode('latin-1') fbuffer = six.StringIO(strings) return fbuffer else: return fbuffer
provides py3 compatibility by converting byte based file stream to string based file stream Arguments: fbuffer: file like objects containing bytes Returns: string buffer
def find_one(self, filter=None, fields=None, skip=0, sort=None): result = self.find(filter=filter, fields=fields, skip=skip, limit=1, sort=sort) if len(result) > 0: return result[0] else: return None
Similar to find. This method will only retrieve one row. If no row matches, returns None
def image(cam): yield marv.set_header(title=cam.topic) msg = yield marv.pull(cam) if msg is None: return pytype = get_message_type(cam) rosmsg = pytype() rosmsg.deserialize(msg.data) name = '{}.jpg'.format(cam.topic.replace('/', ':')[1:]) imgfile = yield marv.make_file(name) ...
Extract first image of input stream to jpg file. Args: cam: Input stream of raw rosbag messages. Returns: File instance for first image of input stream.
def file_content_list(self, project): project_list = False self.load_project_flag_list_file(il.get('project_exceptions'), project) try: flag_list = (fl['file_audits']['file_contents']) except KeyError: logger.error('Key Error processing file_contents list values')...
gathers content strings
def rytov_sc(radius=5e-6, sphere_index=1.339, medium_index=1.333, wavelength=550e-9, pixel_size=1e-7, grid_size=(80, 80), center=(39.5, 39.5), radius_sampling=42): r r_ryt, n_ryt = correct_rytov_sc_input(radius_sc=radius, sphere_index_sc=sphere...
r"""Field behind a dielectric sphere, systematically corrected Rytov This method implements a correction of :func:`qpsphere.models.rytov`, where the `radius` :math:`r_\text{Ryt}` and the `sphere_index` :math:`n_\text{Ryt}` are corrected using the approach described in :cite:`Mueller2018` (eqns. 3,4...
def validate_word(self, word): while word: match = self.seg_regex.match(word) if match: word = word[len(match.group(0)):] else: return False return True
Returns True if `word` consists exhaustively of valid IPA segments Args: word (unicode): input word as Unicode IPA string Returns: bool: True if `word` can be divided exhaustively into IPA segments that exist in the database
def did_you_mean(unknown_command, entry_points): from difflib import SequenceMatcher similarity = lambda x: SequenceMatcher(None, x, unknown_command).ratio() did_you_mean = sorted(entry_points, key=similarity, reverse=True) return did_you_mean[0]
Return the command with the name most similar to what the user typed. This is used to suggest a correct command when the user types an illegal command.
def block_specification_to_number(block: BlockSpecification, web3: Web3) -> BlockNumber: if isinstance(block, str): msg = f"string block specification can't contain {block}" assert block in ('latest', 'pending'), msg number = web3.eth.getBlock(block)['number'] elif isinstance(block, T_Bl...
Converts a block specification to an actual block number
def from_requirement(cls, provider, requirement, parent): candidates = provider.find_matches(requirement) if not candidates: raise NoVersionsAvailable(requirement, parent) return cls( candidates=candidates, information=[RequirementInformation(requirement, pare...
Build an instance from a requirement.
def job(name, **kwargs): return task(name=name, schedulable=True, base=JobTask, bind=True, **kwargs)
A shortcut decorator for declaring jobs
def get_modpath_from_modname(modname, prefer_pkg=False, prefer_main=False): from os.path import dirname, basename, join, exists initname = '__init__.py' mainname = '__main__.py' if modname in sys.modules: modpath = sys.modules[modname].__file__.replace('.pyc', '.py') else: import pkg...
Same as get_modpath but doesnt import directly SeeAlso: get_modpath
def _close_app(app, mongo_client, client): app.stop() client.close() mongo_client.close()
Ensures that the app is properly closed
def htmresearchCorePrereleaseInstalled(): try: coreDistribution = pkg_resources.get_distribution("htmresearch-core") if pkg_resources.parse_version(coreDistribution.version).is_prerelease: return True except pkg_resources.DistributionNotFound: pass return False
Make an attempt to determine if a pre-release version of htmresearch-core is installed already. @return: boolean
def cursor_after(self): if isinstance(self._cursor_after, BaseException): raise self._cursor_after return self._cursor_after
Return the cursor after the current item. You must pass a QueryOptions object with produce_cursors=True for this to work. If there is no cursor or no current item, raise BadArgumentError. Before next() has returned there is no cursor. Once the loop is exhausted, this returns the cursor after th...
def unregister_listener(self, address, func): listeners = self.address_listeners[address] if listeners is None: return False if func in listeners: listeners.remove(func) return True return False
Removes a listener function for a given address Remove the listener for the given address. Returns true if the listener was found and removed, false otherwise
def _format_generic(lines, element, printed, spacer=""): for doc in element.docstring: if doc.doctype.lower() not in printed: lines.append(spacer + doc.__str__())
Generically formats all remaining docstrings and custom XML tags that don't appear in the list of already printed documentation. :arg printed: a list of XML tags for the element that have already been handled by a higher method.
def wash_html_id(dirty): import re if not dirty[0].isalpha(): dirty = 'i' + dirty non_word = re.compile(r'[^\w]+') return non_word.sub('', dirty)
Strip non-alphabetic or newline characters from a given string. It can be used as a HTML element ID (also with jQuery and in all browsers). :param dirty: the string to wash :returns: the HTML ID ready string
def _build_sentence(word): return ( "(?:{word}|[{non_stops}]|(?<![{stops} ]) )+" "[{stops}]['\"\]\}}\)]*" ).format(word=word, non_stops=non_stops.replace('-', '\-'), stops=stops)
Builds a Pinyin sentence re pattern from a Pinyin word re pattern. A sentence is defined as a series of valid Pinyin words, punctuation (non-stops), and spaces followed by a single stop and zero or more container-closing punctuation marks (e.g. apostrophe and brackets).
def get_available_options(self, service_name): options = {} for data_dir in self.data_dirs: service_glob = "{0}-*.json".format(service_name) path = os.path.join(data_dir, service_glob) found = glob.glob(path) for match in found: base = os.p...
Fetches a collection of all JSON files for a given service. This checks user-created files (if present) as well as including the default service files. Example:: >>> loader.get_available_options('s3') { '2013-11-27': [ '~/.boto-overr...
def service_desks(self): url = self._options['server'] + '/rest/servicedeskapi/servicedesk' headers = {'X-ExperimentalApi': 'opt-in'} r_json = json_loads(self._session.get(url, headers=headers)) projects = [ServiceDesk(self._options, self._session, raw_project_json) f...
Get a list of ServiceDesk Resources from the server visible to the current authenticated user. :rtype: List[ServiceDesk]
def sign(self, private_keys): if private_keys is None or not isinstance(private_keys, list): raise TypeError('`private_keys` must be a list instance') def gen_public_key(private_key): public_key = private_key.get_verifying_key().encode() return public_key.decode() ...
Fulfills a previous Transaction's Output by signing Inputs. Note: This method works only for the following Cryptoconditions currently: - Ed25519Fulfillment - ThresholdSha256 Furthermore, note that all keys required to f...
def close(self): self._check_device_status() hidapi.hid_close(self._device) self._device = None
Close connection to HID device. Automatically run when a Device object is garbage-collected, though manual invocation is recommended.
def dt2ts(dt): assert isinstance(dt, (datetime.datetime, datetime.date)) ret = time.mktime(dt.timetuple()) if isinstance(dt, datetime.datetime): ret += 1e-6 * dt.microsecond return ret
Converts to float representing number of seconds since 1970-01-01 GMT.
def _check_permission(self, name, obj=None): def redirect_or_exception(ex): if not self.request.user or not self.request.user.is_authenticated: if self.auto_login_redirect: redirect_to_login(self.request.get_full_path()) else: r...
If customer is not authorized he should not get information that object is exists. Therefore 403 is returned if object was not found or is redirected to the login page. If custmer is authorized and object was not found is returned 404. If object was found and user is not authorized is returned 4...
def fetch(self, **kwargs) -> 'FetchContextManager': assert self.method in self._allowed_methods, \ 'Disallowed HTTP method: {}'.format(self.method) self.date = datetime.now(tzutc()) self.headers['Date'] = self.date.isoformat() if self.content_type is not None: ...
Sends the request to the server and reads the response. You may use this method either with plain synchronous Session or AsyncSession. Both the followings patterns are valid: .. code-block:: python3 from ai.backend.client.request import Request from ai.backend.client.sess...
def retrieve_data(self): df = self.manager.get_historic_data(self.start.date(), self.end.date()) df.replace(0, np.nan, inplace=True) return df
Retrives data as a DataFrame.
def find_nearest(x, x0) -> Tuple[int, Any]: x = np.asanyarray(x) x0 = np.atleast_1d(x0) if x.size == 0 or x0.size == 0: raise ValueError('empty input(s)') if x0.ndim not in (0, 1): raise ValueError('2-D x0 not handled yet') ind = np.empty_like(x0, dtype=int) for i, xi in enumerat...
This find_nearest function does NOT assume sorted input inputs: x: array (float, int, datetime, h5py.Dataset) within which to search for x0 x0: singleton or array of values to search for in x outputs: idx: index of flattened x nearest to x0 (i.e. works with higher than 1-D arrays also) xidx: ...
def submit(self, port_id, tuple_): port_index = self._splpy_output_ports[port_id] ec._submit(self, port_index, tuple_)
Submit a tuple to the output port. The value to be submitted (``tuple_``) can be a ``None`` (nothing will be submitted), ``tuple``, ``dict` or ``list`` of those types. For details on how the ``tuple_`` is mapped to an SPL tuple see :ref:`submit-from-python`. Args: port_id:...
def build_request_relationship(type, ids): if ids is None: return { 'data': None } elif isinstance(ids, str): return { 'data': {'id': ids, 'type': type} } else: return { "data": [{"id": id, "type": type} for id in ids] }
Build a relationship list. A relationship list is used to update relationships between two resources. Setting sensors on a label, for example, uses this function to construct the list of sensor ids to pass to the Helium API. Args: type(string): The resource type for the ids in the relatio...
def init_search(self): if self.verbose: logger.info("Initializing search.") for generator in self.generators: graph = generator(self.n_classes, self.input_shape).generate( self.default_model_len, self.default_model_width ) model_id = self.m...
Call the generators to generate the initial architectures for the search.
def get_publisher(self): doi_prefix = self.doi.split('/')[0] try: publisher_mod = openaccess_epub.publisher.import_by_doi(doi_prefix) except ImportError as e: log.exception(e) return None return publisher_mod.pub_class(self)
This method defines how the Article tries to determine the publisher of the article. This method relies on the success of the get_DOI method to fetch the appropriate full DOI for the article. It then takes the DOI prefix which corresponds to the publisher and then uses that to attempt t...
def file_copy(name, dest=None, **kwargs): ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} ret['changes'] = __salt__['junos.file_copy'](name, dest, **kwargs) return ret
Copies the file from the local device to the junos device. .. code-block:: yaml /home/m2/info.txt: junos: - file_copy - dest: info_copy.txt Parameters: Required * src: The sorce path where the file is kept. * dest: ...
def hook_drag(self): widget = self.widget widget.mousePressEvent = self.mousePressEvent widget.mouseMoveEvent = self.mouseMoveEvent widget.mouseReleaseEvent = self.mouseReleaseEvent
Install the hooks for drag operations.
def removeSubscribers(self, emails_list): if not hasattr(emails_list, "__iter__"): error_msg = "Input parameter 'emails_list' is not iterable" self.log.error(error_msg) raise exception.BadValue(error_msg) missing_flags = True headers, raw_data = self._perform_...
Remove subscribers from this workitem If the subscribers have not been added, no more actions will be performed. :param emails_list: a :class:`list`/:class:`tuple`/:class:`set` contains the the subscribers' emails
def ellipsis(text, length, symbol="..."): if len(text) > length: pos = text.rfind(" ", 0, length) if pos < 0: return text[:length].rstrip(".") + symbol else: return text[:pos].rstrip(".") + symbol else: return text
Present a block of text of given length. If the length of available text exceeds the requested length, truncate and intelligently append an ellipsis.
def load_data(self, data_np): image = AstroImage.AstroImage(logger=self.logger) image.set_data(data_np) self.set_image(image)
Load raw numpy data into the viewer.
def update(self, **kwargs): update_compute = False old_json = self.__json__() compute_properties = None for prop in kwargs: if getattr(self, prop) != kwargs[prop]: if prop not in self.CONTROLLER_ONLY_PROPERTIES: update_compute = True ...
Update the node on the compute server :param kwargs: Node properties
def to_geojson(self, filename): with open(filename, 'w') as fd: json.dump(self.to_record(WGS84_CRS), fd)
Save vector as geojson.
def _make_concept(self, entity): name = self._sanitize(entity['canonicalName']) db_refs = _get_grounding(entity) concept = Concept(name, db_refs=db_refs) metadata = {arg['type']: arg['value']['@id'] for arg in entity['arguments']} return concept, metadata
Return Concept from a Hume entity.
def tip_fdr(a, alpha=0.05): zscores = tip_zscores(a) pvals = stats.norm.pdf(zscores) rejected, fdrs = fdrcorrection(pvals) return fdrs
Returns adjusted TIP p-values for a particular `alpha`. (see :func:`tip_zscores` for more info) :param a: NumPy array, where each row is the signal for a feature :param alpha: False discovery rate
def run(self): from zengine.lib.cache import WFSpecNames if self.manager.args.clear: self._clear_models() return if self.manager.args.wf_path: paths = self.get_wf_from_path(self.manager.args.wf_path) else: paths = self.get_workflows() ...
read workflows, checks if it's updated, tries to update if there aren't any running instances of that wf
def make_and_return_path_from_path_and_folder_names(path, folder_names): for folder_name in folder_names: path += folder_name + '/' try: os.makedirs(path) except FileExistsError: pass return path
For a given path, create a directory structure composed of a set of folders and return the path to the \ inner-most folder. For example, if path='/path/to/folders', and folder_names=['folder1', 'folder2'], the directory created will be '/path/to/folders/folder1/folder2/' and the returned path will be '/pat...
def get_buffer(self): if self.doc_to_update: self.update_sources() ES_buffer = self.action_buffer self.clean_up() return ES_buffer
Get buffer which needs to be bulked to elasticsearch
def traverse_inorder(self, leaves=True, internal=True): for node in self.root.traverse_inorder(leaves=leaves, internal=internal): yield node
Perform an inorder traversal of the ``Node`` objects in this ``Tree`` Args: ``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``
def cmd_watch(args): if len(args) == 0: mpstate.status.watch = None return mpstate.status.watch = args print("Watching %s" % mpstate.status.watch)
watch a mavlink packet pattern
def serialize_on_post_delete(sender, instance, using, **kwargs): try: wrapped_instance = site_offline_models.get_wrapped_instance(instance) except ModelNotRegistered: pass else: wrapped_instance.to_outgoing_transaction(using, created=False, deleted=True)
Creates a serialized OutgoingTransaction when a model instance is deleted. Skip those not registered.
def predict(self, X, nsamples=200, likelihood_args=()): Ey, _ = self.predict_moments(X, nsamples, likelihood_args) return Ey
Predict target values from Bayesian generalized linear regression. Parameters ---------- X : ndarray (N*,d) array query input dataset (N* samples, d dimensions). nsamples : int, optional Number of samples for sampling the expected target values from the ...
def _load(self): if os.path.exists(self.path): root = ET.parse(self.path).getroot() if (root.tag == "fortpy" and "mode" in root.attrib and root.attrib["mode"] == "template" and "direction" in root.attrib and root.attrib["direction"] == self.direction): ...
Extracts the XML template data from the file.
def get_extra(cls, name=None): if not name: return cls._extra_config return cls._extra_config.get(name, None)
Gets extra configuration parameters. These parameters should be loaded through load_extra or load_extra_data. Args: name: str, the name of the configuration data to load. Returns: A dictionary containing the requested configuration data. None if data was never loaded under that name.
def take(self, num_instances: int = 1, timeout: Optional[float] = None) -> None: if num_instances < 1: raise ValueError(f"Process must request at least 1 instance; here requested {num_instances}.") if num_instances > self.num_instances_total: raise ValueError( f"P...
The current process reserves a certain number of instances. If there are not enough instances available, the process is made to join a queue. When this method returns, the process holds the instances it has requested to take. :param num_instances: Number of resource instances to tak...
def _get_billing_cycle_number(self, billing_cycle): begins_before_initial_date = billing_cycle.date_range.lower < self.initial_billing_cycle.date_range.lower if begins_before_initial_date: raise ProvidedBillingCycleBeginsBeforeInitialBillingCycle( '{} precedes initial cycle {...
Gets the 1-indexed number of the billing cycle relative to the provided billing cycle
def unzip_file(source_file, dest_dir=None, mkdir=False): if dest_dir is None: dest_dir, fname = os.path.split(source_file) elif not os.path.isdir(dest_dir): if mkdir: preparedir(dest_dir) else: created = preparedir(dest_dir, False) if not created: ...
Unzip a compressed file. Args: source_file: Full path to a valid compressed file (e.g. c:/ladybug/testPts.zip) dest_dir: Target folder to extract to (e.g. c:/ladybug). Default is set to the same directory as the source file. mkdir: Set to True to create the directory if doesn't ...
def _get_retention_policy_value(self): if self.RetentionPolicy is None or self.RetentionPolicy.lower() == self.RETAIN.lower(): return self.RETAIN elif self.RetentionPolicy.lower() == self.DELETE.lower(): return self.DELETE elif self.RetentionPolicy.lower() not in self.ret...
Sets the deletion policy on this resource. The default is 'Retain'. :return: value for the DeletionPolicy attribute.
def fen(self, *, shredder: bool = False, en_passant: str = "legal", promoted: Optional[bool] = None) -> str: return " ".join([ self.epd(shredder=shredder, en_passant=en_passant, promoted=promoted), str(self.halfmove_clock), str(self.fullmove_number) ])
Gets a FEN representation of the position. A FEN string (e.g., ``rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1``) consists of the position part :func:`~chess.Board.board_fen()`, the :data:`~chess.Board.turn`, the castling part (:data:`~chess.Board.castling_rights`), ...
def _exec_info(self): if self._info is None: self._info = self.client.exec_inspect(self.exec_id) return self._info
Caching wrapper around client.exec_inspect
def get_field_setup_query(query, model, column_name): if not hasattr(model, column_name): rel_model = getattr(model, column_name.split(".")[0]).mapper.class_ query = query.join(rel_model) return query, getattr(rel_model, column_name.split(".")[1]) else: return query, getattr(mode...
Help function for SQLA filters, checks for dot notation on column names. If it exists, will join the query with the model from the first part of the field name. example: Contact.created_by: if created_by is a User model, it will be joined to the query.
def _completion_checker(async_id, context_id): if not context_id: logging.debug("Context for async %s does not exist", async_id) return context = FuriousContext.from_id(context_id) marker = FuriousCompletionMarker.get_by_id(context_id) if marker and marker.complete: logging.info(...
Check if all Async jobs within a Context have been run.
def _transliterate (self, text, outFormat): result = [] text = self._preprocess(text) i = 0 while i < len(text): if text[i].isspace(): result.append(text[i]) i = i+1 else: chr = self._getNextChar(text, i) ...
Transliterate the text to Unicode.
def close(self): log.debug("%r: close", self) self._closing = True brokerclients, self.clients = self.clients, None self._close_brokerclients(brokerclients.values()) self.reset_all_metadata() return self.close_dlist or defer.succeed(None)
Permanently dispose of the client - Immediately mark the client as closed, causing current operations to fail with :exc:`~afkak.common.CancelledError` and future operations to fail with :exc:`~afkak.common.ClientError`. - Clear cached metadata. - Close any connections to Kaf...
def serialize_upload(name, storage, url): if isinstance(storage, LazyObject): storage._setup() cls = storage._wrapped.__class__ else: cls = storage.__class__ return signing.dumps({ 'name': name, 'storage': '%s.%s' % (cls.__module__, cls.__name__) }, salt=url)
Serialize uploaded file by name and storage. Namespaced by the upload url.
def diffsp(self, col: str, serie: "iterable", name: str="Diff"): try: d = [] for i, row in self.df.iterrows(): v = (row[col]*100) / serie[i] d.append(v) self.df[name] = d except Exception as e: self.err(e, self._append, "Can...
Add a diff column in percentage from a serie. The serie is an iterable of the same length than the dataframe :param col: column to diff :type col: str :param serie: serie to diff from :type serie: iterable :param name: name of the diff col, defaults to "Diff" :p...
def cancel_orders(self, order_ids: List[str]) -> List[str]: orders_to_cancel = order_ids self.log.debug(f'Canceling orders on {self.name}: ids={orders_to_cancel}') cancelled_orders = [] if self.dry_run: self.log.warning(f'DRY RUN: Orders cancelled on {self.name}: {orders_to_c...
Cancel multiple orders by a list of IDs.
def _copy_selection(self, *event): if react_to_event(self.view, self.view.editor, event): logger.debug("copy selection") global_clipboard.copy(self.model.selection) return True
Copies the current selection to the clipboard.
def folder(self) -> typing.Union[str, None]: if 'folder' in self.data: return self.data.get('folder') elif self.project_folder: if callable(self.project_folder): return self.project_folder() else: return self.project_folder retu...
The folder, relative to the project source_directory, where the file resides :return:
def get_repository_configuration(id): response = utils.checked_api_call(pnc_api.repositories, 'get_specific', id=id) if response: return response.content
Retrieve a specific RepositoryConfiguration
def del_graph(self, graph): g = self.pack(graph) self.sql('del_edge_val_graph', g) self.sql('del_node_val_graph', g) self.sql('del_edge_val_graph', g) self.sql('del_edges_graph', g) self.sql('del_nodes_graph', g) self.sql('del_graph', g)
Delete all records to do with the graph