positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def downloadTo(self, href, localpath): """ Download file to localstorage :param href: remote path :param localpath: local path :return: response """ for iTry in range(TRYINGS): logger.info(u("downloadTo(%s): %s %s") % (iTry, href, localpath)) try: href = remote(href) localpath = _(localpath) conn = self.getConnection() conn.request("GET", _encode_utf8(href), "", self.getHeaders()) response = conn.getresponse() checkResponse(response) f = None try: while True: data = _decode_utf8(response.read(1024)) if not data: break if data == u('resource not found'): return False if not f: f = open(localpath, "w") f.write(data) finally: if f: f.close() return True except ConnectionException: raise except Exception: e = sys.exc_info()[1] logger.exception(e)
Download file to localstorage :param href: remote path :param localpath: local path :return: response
def _p_iteration(self, P, Bp_solver, Vm, Va, pvpq): """ Performs a P iteration, updates Va. """ dVa = -Bp_solver.solve(P) # Update voltage. Va[pvpq] = Va[pvpq] + dVa V = Vm * exp(1j * Va) return V, Vm, Va
Performs a P iteration, updates Va.
def change_type(self, cls): """ Change type of diagram in this chart. Accepts one of classes which extend Diagram. """ target_type = cls._type target = self._embedded.createInstance(target_type) self._embedded.setDiagram(target) return cls(target)
Change type of diagram in this chart. Accepts one of classes which extend Diagram.
def source_loader(self, source_paths, create_missing_tables=True): """Load source from 3 csv files. First file should contain global settings: * ``native_lagnauge,languages`` header on first row * appropriate values on following rows Example:: native_lagnauge,languages ru,ru ,en Second file should contain templates: * ``template_name,probability,genders,template`` header on first row * appropriate values on following rows (separate values with semicolon ";" in template column) Example:: template_name,probability,genders,template male_1,5,m,prefixes;male_suffixes baby_1,1,m;f,prefixes;descriptive Third file should contain tables with values for template slugs in all languages: * first row should contain slugs with language code after colon for each * appropriate values on following rows. Multiple forms may be specified using semicolon as separator Example:: prefixes:ru,prefixes:en,male_suffixes:ru,male_suffixes:en,descriptive:ru,descriptive:en Бж,Bzh,пра,pra,быстряк;быстряку,fasty дон;дону,don,Иван;Ивану,Ivan,Иванов;Иванову,Ivanov Note: you may use slugs without ":lang_code" suffix in csv header of tables file. Such headers will be treated as headers for native language If tables are missing for some slug then it is automatically created with values equeal to slug itself. So you may use some slugs without specifying tables data for them. Example for apostrophe and space: male_1,5,m,prefixes;';male_suffixes male_full,5,m,first_name; ;last_name """ if not isinstance(source_paths, Iterable) or len(source_paths) < 3: raise TypeError('FromCSVTablesGenerator.source_loader accepts list of 3 paths as argument. Got `%s` instead' % source_paths) self.native_language = '' self.languages = [] self.templates = [] self.tables = {} self.load_settings(source_paths[0]) template_slugs = self.load_templates(source_paths[1]) self.load_tables(source_paths[2]) if create_missing_tables: self.create_missing_tables(template_slugs) self.full_forms_for_languages = set()
Load source from 3 csv files. First file should contain global settings: * ``native_lagnauge,languages`` header on first row * appropriate values on following rows Example:: native_lagnauge,languages ru,ru ,en Second file should contain templates: * ``template_name,probability,genders,template`` header on first row * appropriate values on following rows (separate values with semicolon ";" in template column) Example:: template_name,probability,genders,template male_1,5,m,prefixes;male_suffixes baby_1,1,m;f,prefixes;descriptive Third file should contain tables with values for template slugs in all languages: * first row should contain slugs with language code after colon for each * appropriate values on following rows. Multiple forms may be specified using semicolon as separator Example:: prefixes:ru,prefixes:en,male_suffixes:ru,male_suffixes:en,descriptive:ru,descriptive:en Бж,Bzh,пра,pra,быстряк;быстряку,fasty дон;дону,don,Иван;Ивану,Ivan,Иванов;Иванову,Ivanov Note: you may use slugs without ":lang_code" suffix in csv header of tables file. Such headers will be treated as headers for native language If tables are missing for some slug then it is automatically created with values equeal to slug itself. So you may use some slugs without specifying tables data for them. Example for apostrophe and space: male_1,5,m,prefixes;';male_suffixes male_full,5,m,first_name; ;last_name
def autoreconnect(self, sleep=1, attempt=3, exponential=True, jitter=5): """ Tries to reconnect with some delay: exponential=False: up to `attempt` times with `sleep` seconds between each try exponential=True: up to `attempt` times with exponential growing `sleep` and random delay in range 1..`jitter` (exponential backoff) :param sleep: time to sleep between two attempts to reconnect :type sleep: float or int :param attempt: maximal number of attempts :type attempt: int :param exponential: if set - use exponential backoff logic :type exponential: bool :param jitter: top value of random delay, sec :type jitter: int """ p = 0 while attempt is None or attempt > 0: try: self.reconnect() return True except GraphiteSendException: if exponential: p += 1 time.sleep(pow(sleep, p) + random.randint(1, jitter)) else: time.sleep(sleep) attempt -= 1 return False
Tries to reconnect with some delay: exponential=False: up to `attempt` times with `sleep` seconds between each try exponential=True: up to `attempt` times with exponential growing `sleep` and random delay in range 1..`jitter` (exponential backoff) :param sleep: time to sleep between two attempts to reconnect :type sleep: float or int :param attempt: maximal number of attempts :type attempt: int :param exponential: if set - use exponential backoff logic :type exponential: bool :param jitter: top value of random delay, sec :type jitter: int
def generate_security_hash(self, content_type, object_pk, timestamp): """ Generate a HMAC security hash from the provided info. """ info = (content_type, object_pk, timestamp) key_salt = "django.contrib.forms.CommentSecurityForm" value = "-".join(info) return salted_hmac(key_salt, value).hexdigest()
Generate a HMAC security hash from the provided info.
def clear_url(self): """Removes the url. raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.repository.AssetContentForm.clear_url_template if (self.get_url_metadata().is_read_only() or self.get_url_metadata().is_required()): raise errors.NoAccess() self._my_map['url'] = self._url_default
Removes the url. raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
def is_ipaddress(hostname): """Detects whether the hostname given is an IP address. :param str hostname: Hostname to examine. :return: True if the hostname is an IP address, False otherwise. """ if six.PY3 and isinstance(hostname, bytes): # IDN A-label bytes are ASCII compatible. hostname = hostname.decode('ascii') families = [socket.AF_INET] if hasattr(socket, 'AF_INET6'): families.append(socket.AF_INET6) for af in families: try: inet_pton(af, hostname) except (socket.error, ValueError, OSError): pass else: return True return False
Detects whether the hostname given is an IP address. :param str hostname: Hostname to examine. :return: True if the hostname is an IP address, False otherwise.
def idaunpack(buf): """ Special data packing format, used in struct definitions, and .id2 files sdk functions: pack_dd etc. """ buf = bytearray(buf) def nextval(o): val = buf[o] ; o += 1 if val == 0xff: # 32 bit value val, = struct.unpack_from(">L", buf, o) o += 4 return val, o if val < 0x80: # 7 bit value return val, o val <<= 8 val |= buf[o] ; o += 1 if val < 0xc000: # 14 bit value return val & 0x3fff, o # 29 bit value val <<= 8 val |= buf[o] ; o += 1 val <<= 8 val |= buf[o] ; o += 1 return val & 0x1fffffff, o values = [] o = 0 while o < len(buf): val, o = nextval(o) values.append(val) return values
Special data packing format, used in struct definitions, and .id2 files sdk functions: pack_dd etc.
def json_to_pages(json, user, preferred_lang=None): """ Attept to create/update pages from JSON string json. user is the user that will be used when creating a page if a page's original author can't be found. preferred_lang is the language code of the slugs to include in error messages (defaults to settings.PAGE_DEFAULT_LANGUAGE). Returns (errors, pages_created) where errors is a list of strings and pages_created is a list of: (page object, created bool, messages list of strings) tuples. If any errors are detected there the error list will contain information for the user and no pages will be created/updated. """ from .models import Page if not preferred_lang: preferred_lang = settings.PAGE_DEFAULT_LANGUAGE d = simplejson.loads(json) try: errors = validate_pages_json_data(d, preferred_lang) except KeyError as e: errors = [_('JSON file is invalid: %s') % (e.args[0],)] pages_created = [] if not errors: # pass one for p in d['pages']: pages_created.append( Page.objects.create_and_update_from_json_data(p, user)) # pass two for p, results in zip(d['pages'], pages_created): page, created, messages = results rtcs = p['redirect_to_complete_slug'] if rtcs: messages.extend(page.update_redirect_to_from_json(rtcs)) # clean up MPTT links #Page.objects.rebuild() return errors, pages_created
Attept to create/update pages from JSON string json. user is the user that will be used when creating a page if a page's original author can't be found. preferred_lang is the language code of the slugs to include in error messages (defaults to settings.PAGE_DEFAULT_LANGUAGE). Returns (errors, pages_created) where errors is a list of strings and pages_created is a list of: (page object, created bool, messages list of strings) tuples. If any errors are detected there the error list will contain information for the user and no pages will be created/updated.
def get_password_from_keyring(entry=None, username=None): """ :param entry: The entry in the keychain. This is a caller specific key. :param username: The username to get the password for. Default is the current user. """ if username is None: username = get_username() has_keychain = initialize_keychain() # Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error. unlock_keychain(username) if has_keychain and entry is not None: try: return keyring.get_password(entry, username) except Exception as e: log.warn("Unable to get password from keyring. Continuing..") log.debug(e) return None
:param entry: The entry in the keychain. This is a caller specific key. :param username: The username to get the password for. Default is the current user.
def finish(): # type: () -> None """ Merge current feature into develop. """ pretend = context.get('pretend', False) if not pretend and (git.staged() or git.unstaged()): log.err( "You have uncommitted changes in your repo!\n" "You need to stash them before you merge the hotfix branch" ) sys.exit(1) develop = conf.get('git.devel_branch', 'develop') master = conf.get('git.master_branch', 'master') branch = git.current_branch(refresh=True) common.assert_branch_type('hotfix') # Merge hotfix into master common.git_checkout(master) common.git_pull(master) common.git_merge(master, branch.name) # Merge hotfix into develop common.git_checkout(develop) common.git_pull(develop) common.git_merge(develop, branch.name) # Cleanup common.git_branch_delete(branch.name) common.git_prune() common.git_checkout(master)
Merge current feature into develop.
def count(self, value): """S.count(value) -> integer -- return number of occurrences of value""" from pcapkit.protocols.protocol import Protocol try: flag = issubclass(value, Protocol) except TypeError: flag = issubclass(type(value), Protocol) if flag or isinstance(value, Protocol): value = value.__index__() if isinstance(value, tuple): value = r'|'.join(value) with contextlib.suppress(Exception): return sum(1 for data in self.__data__ if re.fullmatch(value, data, re.IGNORECASE) is not None) return 0
S.count(value) -> integer -- return number of occurrences of value
def plot_similarity(ensemble, ax=None, jitter=0.1, scatter_kw=dict(), line_kw=dict()): """Plots similarity across optimization runs as a function of model rank. Parameters ---------- ensemble : Ensemble object holds optimization results across a range of model ranks ax : matplotlib axis (optional) axis to plot on (defaults to current axis object) jitter : float (optional) amount of horizontal jitter added to scatterpoints (default=0.1) scatter_kw : dict (optional) keyword arguments for styling the scatterpoints line_kw : dict (optional) keyword arguments for styling the line References ---------- Ulrike von Luxburg (2010). Clustering Stability: An Overview. Foundations and Trends in Machine Learning. https://arxiv.org/abs/1007.1075 """ if ax is None: ax = plt.gca() # compile statistics for plotting x, sim, mean_sim = [], [], [] for rank in sorted(ensemble.results): # reconstruction errors for rank-r models s = ensemble.similarities(rank)[1:] sim.extend(s) x.extend(np.full(len(s), rank)) mean_sim.append(np.mean(s)) # add horizontal jitter ux = np.unique(x) x = np.array(x) + (np.random.rand(len(x))-0.5)*jitter # make plot ax.scatter(x, sim, **scatter_kw) ax.plot(ux, mean_sim, **line_kw) ax.set_xlabel('model rank') ax.set_ylabel('model similarity') ax.set_ylim([0, 1.1]) return ax
Plots similarity across optimization runs as a function of model rank. Parameters ---------- ensemble : Ensemble object holds optimization results across a range of model ranks ax : matplotlib axis (optional) axis to plot on (defaults to current axis object) jitter : float (optional) amount of horizontal jitter added to scatterpoints (default=0.1) scatter_kw : dict (optional) keyword arguments for styling the scatterpoints line_kw : dict (optional) keyword arguments for styling the line References ---------- Ulrike von Luxburg (2010). Clustering Stability: An Overview. Foundations and Trends in Machine Learning. https://arxiv.org/abs/1007.1075
def tsort(self): """Given a partial ordering, return a totally ordered list. part is a dict of partial orderings. Each value is a set, which the key depends on. The return value is a list of sets, each of which has only dependencies on items in previous entries in the list. raise ValueError if ordering is not possible (check for circular or missing dependencies)""" task_dict = {} for key, task in self.tasks.iteritems(): task_dict[task] = task.dependencies # parts = parts.copy() parts = task_dict.copy() result = [] while True: level = set([name for name, deps in parts.iteritems() if not deps]) if not level: break result.append(level) parts = dict([(name, deps - level) for name, deps in parts.iteritems() if name not in level]) if parts: raise ValueError('total ordering not possible (check for circular or missing dependencies)') return result
Given a partial ordering, return a totally ordered list. part is a dict of partial orderings. Each value is a set, which the key depends on. The return value is a list of sets, each of which has only dependencies on items in previous entries in the list. raise ValueError if ordering is not possible (check for circular or missing dependencies)
def calc_max_flexural_wavelength(self): """ Returns the approximate maximum flexural wavelength This is important when padding of the grid is required: in Flexure (this code), grids are padded out to one maximum flexural wavelength, but in any case, the flexural wavelength is a good characteristic distance for any truncation limit """ if np.isscalar(self.D): Dmax = self.D else: Dmax = self.D.max() # This is an approximation if there is fill that evolves with iterations # (e.g., water), but should be good enough that this won't do much to it alpha = (4*Dmax/(self.drho*self.g))**.25 # 2D flexural parameter self.maxFlexuralWavelength = 2*np.pi*alpha self.maxFlexuralWavelength_ncells = int(np.ceil(self.maxFlexuralWavelength / self.dx))
Returns the approximate maximum flexural wavelength This is important when padding of the grid is required: in Flexure (this code), grids are padded out to one maximum flexural wavelength, but in any case, the flexural wavelength is a good characteristic distance for any truncation limit
def uniq(args): """ %prog uniq gffile cdsfasta Remove overlapping gene models. Similar to formats.gff.uniq(), overlapping 'piles' are processed, one by one. Here, we use a different algorithm, that retains the best non-overlapping subset witin each pile, rather than single best model. Scoring function is also different, rather than based on score or span, we optimize for the subset that show the best combined score. Score is defined by: score = (1 - AED) * length """ p = OptionParser(uniq.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) gffile, cdsfasta = args gff = Gff(gffile) sizes = Sizes(cdsfasta).mapping gene_register = {} for g in gff: if g.type != "mRNA": continue aed = float(g.attributes["_AED"][0]) gene_register[g.parent] = (1 - aed) * sizes[g.accn] allgenes = import_feats(gffile) g = get_piles(allgenes) bestids = set() for group in g: ranges = [to_range(x, score=gene_register[x.accn], id=x.accn) \ for x in group] selected_chain, score = range_chain(ranges) bestids |= set(x.id for x in selected_chain) removed = set(x.accn for x in allgenes) - bestids fw = open("removed.ids", "w") print("\n".join(sorted(removed)), file=fw) fw.close() populate_children(opts.outfile, bestids, gffile, "gene")
%prog uniq gffile cdsfasta Remove overlapping gene models. Similar to formats.gff.uniq(), overlapping 'piles' are processed, one by one. Here, we use a different algorithm, that retains the best non-overlapping subset witin each pile, rather than single best model. Scoring function is also different, rather than based on score or span, we optimize for the subset that show the best combined score. Score is defined by: score = (1 - AED) * length
def deactivatePdpContextAccept(): """DEACTIVATE PDP CONTEXT ACCEPT Section 9.5.9""" a = TpPd(pd=0x8) b = MessageType(mesType=0x47) # 01000111 packet = a / b return packet
DEACTIVATE PDP CONTEXT ACCEPT Section 9.5.9
def execute(self, eopatch): """Returns the EOPatch with renamed features. :param eopatch: input EOPatch :type eopatch: EOPatch :return: input EOPatch with the renamed features :rtype: EOPatch """ for feature_type, feature_name, new_feature_name in self.feature_gen(eopatch): eopatch[feature_type][new_feature_name] = eopatch[feature_type][feature_name] del eopatch[feature_type][feature_name] return eopatch
Returns the EOPatch with renamed features. :param eopatch: input EOPatch :type eopatch: EOPatch :return: input EOPatch with the renamed features :rtype: EOPatch
def list_clients(self, instance=None): """ Lists the clients. :param Optional[str] instance: A Yamcs instance name. :rtype: ~collections.Iterable[yamcs.model.Client] """ # Server does not do pagination on listings of this resource. # Return an iterator anyway for similarity with other API methods url = '/clients' if instance: url = '/instances/{}/clients'.format(instance) response = self.get_proto(path=url) message = rest_pb2.ListClientsResponse() message.ParseFromString(response.content) clients = getattr(message, 'client') return iter([Client(client) for client in clients])
Lists the clients. :param Optional[str] instance: A Yamcs instance name. :rtype: ~collections.Iterable[yamcs.model.Client]
def create_typed_target (self, type, project, name, sources, requirements, default_build, usage_requirements): """ Creates a TypedTarget with the specified properties. The 'name', 'sources', 'requirements', 'default_build' and 'usage_requirements' are assumed to be in the form specified by the user in Jamfile corresponding to 'project'. """ assert isinstance(type, basestring) assert isinstance(project, ProjectTarget) assert is_iterable_typed(sources, basestring) assert is_iterable_typed(requirements, basestring) assert is_iterable_typed(default_build, basestring) return self.main_target_alternative (TypedTarget (name, project, type, self.main_target_sources (sources, name), self.main_target_requirements (requirements, project), self.main_target_default_build (default_build, project), self.main_target_usage_requirements (usage_requirements, project)))
Creates a TypedTarget with the specified properties. The 'name', 'sources', 'requirements', 'default_build' and 'usage_requirements' are assumed to be in the form specified by the user in Jamfile corresponding to 'project'.
def load_configuration_from_text_file(register, configuration_file): '''Loading configuration from text files to register object Parameters ---------- register : pybar.fei4.register object configuration_file : string Full path (directory and filename) of the configuration file. If name is not given, reload configuration from file. ''' logging.info("Loading configuration: %s" % configuration_file) register.configuration_file = configuration_file config_dict = parse_global_config(register.configuration_file) if 'Flavor' in config_dict: flavor = config_dict.pop('Flavor').lower() if register.flavor: pass else: register.init_fe_type(flavor) else: if register.flavor: pass else: raise ValueError('Flavor not specified') if 'Chip_ID' in config_dict: chip_id = config_dict.pop('Chip_ID') if register.chip_address: pass else: register.set_chip_address(chip_address=chip_id & 0x7, broadcast=True if chip_id & 0x8 else False) elif 'Chip_Address' in config_dict: chip_address = config_dict.pop('Chip_Address') if register.chip_address: pass else: register.set_chip_address(chip_address) else: if register.chip_id_initialized: pass else: raise ValueError('Chip address not specified') global_registers_configured = [] pixel_registers_configured = [] for key in config_dict.keys(): value = config_dict.pop(key) if key in register.global_registers: register.set_global_register_value(key, value) global_registers_configured.append(key) elif key in register.pixel_registers: register.set_pixel_register_value(key, value) pixel_registers_configured.append(key) elif key in register.calibration_parameters: register.calibration_parameters[key] = value else: register.miscellaneous[key] = value global_registers = register.get_global_register_attributes('name', readonly=False) pixel_registers = register.pixel_registers.keys() global_registers_not_configured = set(global_registers).difference(global_registers_configured) pixel_registers_not_configured = set(pixel_registers).difference(pixel_registers_configured) if global_registers_not_configured: logging.warning("Following global register(s) not configured: {}".format(', '.join('\'' + reg + '\'' for reg in global_registers_not_configured))) if pixel_registers_not_configured: logging.warning("Following pixel register(s) not configured: {}".format(', '.join('\'' + reg + '\'' for reg in pixel_registers_not_configured))) if register.miscellaneous: logging.warning("Found following unknown parameter(s): {}".format(', '.join('\'' + parameter + '\'' for parameter in register.miscellaneous.iterkeys())))
Loading configuration from text files to register object Parameters ---------- register : pybar.fei4.register object configuration_file : string Full path (directory and filename) of the configuration file. If name is not given, reload configuration from file.
def handle_error(self, exp): """Called if a Mapper returns MappingInvalid. Should handle the error and return it in the appropriate format, can be overridden in order to change the error format. :param exp: MappingInvalid exception raised """ payload = { "message": "Invalid or incomplete data provided.", "errors": exp.errors } self.endpoint.return_error(self.error_status, payload=payload)
Called if a Mapper returns MappingInvalid. Should handle the error and return it in the appropriate format, can be overridden in order to change the error format. :param exp: MappingInvalid exception raised
def my_address_string(self): """ For logging client host without resolving. """ addr = getattr(self, 'client_address', ('', None))[0] # If listed in proxy_ips, use the X-Forwarded-For header, if present. if addr in self.proxy_ips: return self.headers.getheader('x-forwarded-for', addr) return addr
For logging client host without resolving.
def on_next_request(self, py_db, request): ''' :param NextRequest request: ''' arguments = request.arguments # : :type arguments: NextArguments thread_id = arguments.threadId if py_db.get_use_libraries_filter(): step_cmd_id = CMD_STEP_OVER_MY_CODE else: step_cmd_id = CMD_STEP_OVER self.api.request_step(py_db, thread_id, step_cmd_id) response = pydevd_base_schema.build_response(request) return NetCommand(CMD_RETURN, 0, response, is_json=True)
:param NextRequest request:
def verify_ed25519_signature(public_key, contents, signature, message): """Verify that ``signature`` comes from ``public_key`` and ``contents``. Args: public_key (Ed25519PublicKey): the key to verify the signature contents (bytes): the contents that was signed signature (bytes): the signature to verify message (str): the error message to raise. Raises: ScriptWorkerEd25519Error: on failure """ try: public_key.verify(signature, contents) except InvalidSignature as exc: raise ScriptWorkerEd25519Error(message % {'exc': str(exc)})
Verify that ``signature`` comes from ``public_key`` and ``contents``. Args: public_key (Ed25519PublicKey): the key to verify the signature contents (bytes): the contents that was signed signature (bytes): the signature to verify message (str): the error message to raise. Raises: ScriptWorkerEd25519Error: on failure
def is_available(self) -> bool: """Indicate if this Monitor is currently available.""" status_response = self._client.get_state( 'api/monitors/daemonStatus/id:{}/daemon:zmc.json'.format( self._monitor_id ) ) if not status_response: _LOGGER.warning('Could not get availability for monitor {}'.format( self._monitor_id )) return False # Monitor_Status was only added in ZM 1.32.3 monitor_status = self._raw_result.get('Monitor_Status', None) capture_fps = monitor_status and monitor_status['CaptureFPS'] return status_response.get('status', False) and capture_fps != "0.00"
Indicate if this Monitor is currently available.
def _get_persistent_boot_devices(self): """Get details of persistent boot devices, its order :returns: List of dictionary of boot sources and list of boot device order :raises: IloError, on an error from iLO. :raises: IloCommandNotSupportedError, if the command is not supported on the server. """ # Check if the BIOS resource if exists. headers_bios, bios_uri, bios_settings = self._check_bios_resource() # Get the Boot resource. boot_settings = self._get_bios_boot_resource(bios_settings) # Get the BootSources resource try: boot_sources = boot_settings['BootSources'] except KeyError: msg = ("BootSources resource not found.") raise exception.IloError(msg) try: boot_order = boot_settings['PersistentBootConfigOrder'] except KeyError: msg = ("PersistentBootConfigOrder resource not found.") raise exception.IloCommandNotSupportedError(msg) return boot_sources, boot_order
Get details of persistent boot devices, its order :returns: List of dictionary of boot sources and list of boot device order :raises: IloError, on an error from iLO. :raises: IloCommandNotSupportedError, if the command is not supported on the server.
def fetcher_factory(conf): """Return initialized fetcher capable of processing given conf.""" global PROMOTERS applicable = [] if not PROMOTERS: PROMOTERS = load_promoters() for promoter in PROMOTERS: if promoter.is_applicable(conf): applicable.append((promoter.PRIORITY, promoter)) if applicable: best_match = sorted(applicable, reverse=True)[0][1] return best_match(conf) else: raise ConfigurationError( 'No fetcher is applicable for "{0}"'.format(conf['name']) )
Return initialized fetcher capable of processing given conf.
def SetStorageWriter(self, storage_writer): """Sets the storage writer. Args: storage_writer (StorageWriter): storage writer. """ self._storage_writer = storage_writer # Reset the last event data information. Each storage file should # contain event data for their events. self._last_event_data_hash = None self._last_event_data_identifier = None
Sets the storage writer. Args: storage_writer (StorageWriter): storage writer.
def summarize_url(url, num_sentences=4, fmt='default'): '''returns: tuple containing * single-line summary candidate * key points in the format specified. ''' title, meta, full_text = goose_extractor(url) if not full_text: raise ArticleExtractionFail("Couldn't extract: {}".format(url)) its = _intertext_score(full_text) tss = _title_similarity_score(full_text,title) if _eval_meta_as_summary(meta): summ = meta if tss[0][2].lower() in summ.lower(): its, tss = _remove_title_from_tuples(its, tss) elif summ.lower() in tss[0][2].lower(): summ = tss[0][2] its, tss = _remove_title_from_tuples(its, tss) else: summ = tss[0][2] its, tss = _remove_title_from_tuples(its, tss) scores = [score[2] for score in _aggregrate_scores(its, tss, num_sentences)] formatted = Formatter(scores, fmt).frmt() return summ, formatted
returns: tuple containing * single-line summary candidate * key points in the format specified.
def process_npdu(self, npdu): """Encode NPDUs from the service access point and send them downstream.""" if _debug: NetworkAdapter._debug("process_npdu %r (net=%r)", npdu, self.adapterNet) pdu = PDU(user_data=npdu.pduUserData) npdu.encode(pdu) self.request(pdu)
Encode NPDUs from the service access point and send them downstream.
def cross_state_value(state): """ Compute the state value of the cross solving search. """ centres, edges = state value = 0 for edge in edges: if "U" in edge: if edge["U"] == centres["D"]["D"]: value += 1 else: value += 2 elif "D" in edge: if edge["D"] != centres["D"]["D"]: value += 3 else: value += 1 edgeposes = {} counts = {f: 0 for f in "LFRB"} ngedges = [] for edge in edges: if "U" in edge and edge["U"] == centres["D"]["D"]: k = "".join(edge.facings.keys()).replace("U", "") edgeposes[k] = edge[k] counts[k] += 1 elif "D" in edge and edge["D"] == centres["D"]["D"]: k = "".join(edge.facings.keys()).replace("D", "") edgeposes[k] = edge[k] counts[k] += 1 elif "U" in edge or "D" in edge: ngedges.append(edge) else: for k, s in edge: if s != centres["D"]["D"]: edgeposes[k] = s counts[k] += 1 break for edge in ngedges: idx = "LFRB".index(edge[centres["D"].colour]) for i in [-1, 1]: if "LFRB"[(idx+1)%4] not in edgeposes: k = "".join(edge.facings.keys()).replace("LFRB"[idx], "") edgeposes["LFRB"[(idx+1)%4]] = edge[k] counts["LFRB"[(idx+1)%4]] += 1 break else: k = "".join(edge.facings.keys()).replace("LFRB"[idx], "") if counts["LFRB"[(idx-1)%4]] > counts["LFRB"[(idx+1)%4]]: edgeposes["LFRB"[(idx-1)%4]] = edge[k] else: edgeposes["LFRB"[(idx+1)%4]] = edge[k] relative_pos = {f: centres[f][f] for f in "LFRB"} if len(edgeposes) == 4: for i in range(4): edgeposes["L"], edgeposes["F"], edgeposes["R"], edgeposes["B"] = \ edgeposes["F"], edgeposes["R"], edgeposes["B"], edgeposes["L"] if edgeposes == relative_pos: break else: value += 5 else: value += 3 return value
Compute the state value of the cross solving search.
def weighted_temperature(self, how='geometric_series'): r""" A new temperature vector is generated containing a multi-day average temperature as needed in the load profile function. Parameters ---------- how : string string which type to return ("geometric_series" or "mean") Notes ----- Equation for the mathematical series of the average tempaerature [1]_: .. math:: T=\frac{T_{D}+0.5\cdot T_{D-1}+0.25\cdot T_{D-2}+ 0.125\cdot T_{D-3}}{1+0.5+0.25+0.125} with :math:`T_D` = Average temperature on the present day :math:`T_{D-i}` = Average temperature on the day - i References ---------- .. [1] `BDEW <https://www.avacon.de/cps/rde/xbcr/avacon/15-06-30_Leitfaden_Abwicklung_SLP_Gas.pdf>`_, BDEW Documentation for heat profiles. """ # calculate daily mean temperature temperature = self.df['temperature'].resample('D').mean().reindex( self.df.index).fillna(method='ffill').fillna(method='bfill') if how == 'geometric_series': temperature_mean = (temperature + 0.5 * np.roll(temperature, 24) + 0.25 * np.roll(temperature, 48) + 0.125 * np.roll(temperature, 72)) / 1.875 elif how == 'mean': temperature_mean = temperature else: temperature_mean = None return temperature_mean
r""" A new temperature vector is generated containing a multi-day average temperature as needed in the load profile function. Parameters ---------- how : string string which type to return ("geometric_series" or "mean") Notes ----- Equation for the mathematical series of the average tempaerature [1]_: .. math:: T=\frac{T_{D}+0.5\cdot T_{D-1}+0.25\cdot T_{D-2}+ 0.125\cdot T_{D-3}}{1+0.5+0.25+0.125} with :math:`T_D` = Average temperature on the present day :math:`T_{D-i}` = Average temperature on the day - i References ---------- .. [1] `BDEW <https://www.avacon.de/cps/rde/xbcr/avacon/15-06-30_Leitfaden_Abwicklung_SLP_Gas.pdf>`_, BDEW Documentation for heat profiles.
def emptyPrinter(self, expr): """Fallback printer""" indent_str = " " * (self._print_level - 1) lines = [] if isinstance(expr.__class__, Singleton): # We exploit that Singletons override __expr__ to directly return # their name return indent_str + repr(expr) if isinstance(expr, Expression): args = expr.args keys = expr.minimal_kwargs.keys() lines.append(indent_str + expr.__class__.__name__ + "(") for arg in args: lines.append(self.doprint(arg) + ",") for key in keys: arg = expr.kwargs[key] lines.append( (" " * self._print_level) + key + '=' + self.doprint(arg).lstrip() + ",") if len(args) > 0 or len(keys) > 0: lines[-1] = lines[-1][:-1] # drop trailing comma for last arg lines[-1] += ")" elif isinstance(expr, (tuple, list)): delims = ("(", ")") if isinstance(expr, tuple) else ("[", "]") if len(expr) == 1: delims = (delims[0], "," + delims[1]) lines.append( indent_str + delims[0] + ", ".join([render_head_repr(v) for v in expr]) + delims[1]) else: lines.append(indent_str + SympyReprPrinter().doprint(expr)) return "\n".join(lines)
Fallback printer
def _class_instance_from_name(class_name, *arg, **kwarg): """ class_name is of the form modA.modB.modC.class module_path splits on "." and the import_path is then ['modA','modB','modC'] the __import__ call is really annoying but essentially it reads like: import class from modA.modB.modC - Then the module variable points to modC - Then you get the class from the module. """ # we first look in tc.extensions for the class name module_path = class_name.split('.') import_path = module_path[0:-1] module = __import__('.'.join(import_path), fromlist=[module_path[-1]]) class_ = getattr(module, module_path[-1]) instance = class_(*arg, **kwarg) return instance
class_name is of the form modA.modB.modC.class module_path splits on "." and the import_path is then ['modA','modB','modC'] the __import__ call is really annoying but essentially it reads like: import class from modA.modB.modC - Then the module variable points to modC - Then you get the class from the module.
def get_jobs(self, id=None, params=None): """ `<>`_ :arg id: The ID of the job(s) to fetch. Accepts glob patterns, or left blank for all jobs """ return self.transport.perform_request( "GET", _make_path("_rollup", "job", id), params=params )
`<>`_ :arg id: The ID of the job(s) to fetch. Accepts glob patterns, or left blank for all jobs
def decompress_messages(self, partitions_offmsgs): """ Decompress pre-defined compressed fields for each message. """ for pomsg in partitions_offmsgs: if pomsg['message']: pomsg['message'] = self.decompress_fun(pomsg['message']) yield pomsg
Decompress pre-defined compressed fields for each message.
def _get_partial_string_timestamp_match_key(self, key, labels): """Translate any partial string timestamp matches in key, returning the new key (GH 10331)""" if isinstance(labels, MultiIndex): if (isinstance(key, str) and labels.levels[0].is_all_dates): # Convert key '2016-01-01' to # ('2016-01-01'[, slice(None, None, None)]+) key = tuple([key] + [slice(None)] * (len(labels.levels) - 1)) if isinstance(key, tuple): # Convert (..., '2016-01-01', ...) in tuple to # (..., slice('2016-01-01', '2016-01-01', None), ...) new_key = [] for i, component in enumerate(key): if (isinstance(component, str) and labels.levels[i].is_all_dates): new_key.append(slice(component, component, None)) else: new_key.append(component) key = tuple(new_key) return key
Translate any partial string timestamp matches in key, returning the new key (GH 10331)
def printed_out(self, name): """ Create a string representation of the action """ opt = self.variables().optional_namestring() req = self.variables().required_namestring() out = '' out += '| |\n' out += '| |---{}({}{})\n'.format(name, req, opt) if self.description: out += '| | {}\n'.format(self.description) return out
Create a string representation of the action
def add_gemini_query(self, name, query): """Add a user defined gemini query Args: name (str) query (str) """ logger.info("Adding query {0} with text {1}".format(name, query)) new_query = GeminiQuery(name=name, query=query) self.session.add(new_query) self.save() return new_query
Add a user defined gemini query Args: name (str) query (str)
def repository(self): """Repository.""" m = re.match("(.+)(_\d{4}_\d{2}_\d{2}_)(.+)", self.__module__) if m: return m.group(1) m = re.match("(.+)(_release_)(.+)", self.__module__) if m: return m.group(1)
Repository.
def all_table_names_in_schema(self, schema, cache=False, cache_timeout=None, force=False): """Parameters need to be passed as keyword arguments. For unused parameters, they are referenced in cache_util.memoized_func decorator. :param schema: schema name :type schema: str :param cache: whether cache is enabled for the function :type cache: bool :param cache_timeout: timeout in seconds for the cache :type cache_timeout: int :param force: whether to force refresh the cache :type force: bool :return: table list :rtype: list """ tables = [] try: tables = self.db_engine_spec.get_table_names( inspector=self.inspector, schema=schema) except Exception as e: logging.exception(e) return tables
Parameters need to be passed as keyword arguments. For unused parameters, they are referenced in cache_util.memoized_func decorator. :param schema: schema name :type schema: str :param cache: whether cache is enabled for the function :type cache: bool :param cache_timeout: timeout in seconds for the cache :type cache_timeout: int :param force: whether to force refresh the cache :type force: bool :return: table list :rtype: list
def _request(self, url, params={}): """Makes a request using the currently open session. :param url: A url fragment to use in the creation of the master url """ r = self._session.get(url=url, params=params, headers=DEFAULT_ORIGIN) return r
Makes a request using the currently open session. :param url: A url fragment to use in the creation of the master url
def _is_valid_endpoint(endpoint): """helper for interval_range to check if start/end are valid types""" return any([is_number(endpoint), isinstance(endpoint, Timestamp), isinstance(endpoint, Timedelta), endpoint is None])
helper for interval_range to check if start/end are valid types
def layout(mtf_graph, mesh_shape, mtf_outputs=()): """Compute layout rules based on a computational graph and mesh shape. Args: mtf_graph: a mtf.Graph. mesh_shape: an mtf.Shape, str, or listlike of mtf.Dimension. mtf_outputs: an optional iterable of mtf.Tensor, representing the outputs of the computation. Returns: a mtf.LayoutRules """ mesh_shape = mtf.convert_to_shape(mesh_shape) estimator = memory_estimator.MemoryEstimator(mtf_graph, mesh_shape, mtf_outputs) optimizer = layout_optimizer.LayoutOptimizer(estimator) return mtf.convert_to_layout_rules(optimizer.solve())
Compute layout rules based on a computational graph and mesh shape. Args: mtf_graph: a mtf.Graph. mesh_shape: an mtf.Shape, str, or listlike of mtf.Dimension. mtf_outputs: an optional iterable of mtf.Tensor, representing the outputs of the computation. Returns: a mtf.LayoutRules
def sync_state(self): """Called to synchronize state (e.g. when parameters have changed). """ oradius = self.radius + self.width if oradius < self.radius: raise ValueError('Outer boundary < inner boundary') d = dict(points=self.points, radius=self.radius, color=self.color, linewidth=self.linewidth, linestyle=self.linestyle, alpha=self.alpha) # update inner object self.objects[0].__dict__.update(d) # update outer object d['radius'] = oradius self.objects[1].__dict__.update(d)
Called to synchronize state (e.g. when parameters have changed).
def breeding_wean(request, breeding_id): """This view is used to generate a form by which to wean pups which belong to a particular breeding set. This view typically is used to wean existing pups. This includes the MouseID, Cage, Markings, Gender and Wean Date fields. For other fields use the breeding-change page. It takes a request in the form /breeding/(breeding_id)/wean/ and returns a form specific to the breeding set defined in breeding_id. breeding_id is the background identification number of the breeding set and does not refer to the barcode of any breeding cage. This view returns a formset in which one row represents one animal. To add extra animals to a breeding set use /breeding/(breeding_id)/pups/. This view is restricted to those with the permission animal.change_animal. """ breeding = Breeding.objects.get(id=breeding_id) strain = breeding.Strain PupsFormSet = inlineformset_factory(Breeding, Animal, extra=0, exclude=('Alive','Father', 'Mother', 'Breeding', 'Notes','Rack','Rack_Position','Strain','Background','Genotype','Death','Cause_of_Death','Backcross','Generation')) if request.method =="POST": formset = PupsFormSet(request.POST, instance=breeding, queryset=Animal.objects.filter(Alive=True, Weaned__isnull=True)) if formset.is_valid(): formset.save() return HttpResponseRedirect( breeding.get_absolute_url() ) else: formset = PupsFormSet(instance=breeding, queryset=Animal.objects.filter(Alive=True, Weaned__isnull=True)) return render(request, "breeding_wean.html", {"formset":formset, 'breeding':breeding})
This view is used to generate a form by which to wean pups which belong to a particular breeding set. This view typically is used to wean existing pups. This includes the MouseID, Cage, Markings, Gender and Wean Date fields. For other fields use the breeding-change page. It takes a request in the form /breeding/(breeding_id)/wean/ and returns a form specific to the breeding set defined in breeding_id. breeding_id is the background identification number of the breeding set and does not refer to the barcode of any breeding cage. This view returns a formset in which one row represents one animal. To add extra animals to a breeding set use /breeding/(breeding_id)/pups/. This view is restricted to those with the permission animal.change_animal.
def remove_group(self, groupname): """Delete a group from the JIRA instance. :param groupname: The group to be deleted from the JIRA instance. :type groupname: str :return: Boolean. Returns True on success. :rtype: bool """ # implementation based on # https://docs.atlassian.com/jira/REST/ondemand/#d2e5173 url = self._options['server'] + '/rest/api/latest/group' x = {'groupname': groupname} self._session.delete(url, params=x) return True
Delete a group from the JIRA instance. :param groupname: The group to be deleted from the JIRA instance. :type groupname: str :return: Boolean. Returns True on success. :rtype: bool
def counts_map(self): """Return 3-D counts map for this component as a Map object. Returns ------- map : `~fermipy.skymap.MapBase` """ try: if isinstance(self.like, gtutils.SummedLikelihood): cmap = self.like.components[0].logLike.countsMap() p_method = cmap.projection().method() else: cmap = self.like.logLike.countsMap() p_method = cmap.projection().method() except Exception: p_method = 0 if p_method == 0: # WCS z = cmap.data() z = np.array(z).reshape(self.enumbins, self.npix, self.npix) return WcsNDMap(copy.deepcopy(self.geom), z) elif p_method == 1: # HPX z = cmap.data() z = np.array(z).reshape(self.enumbins, np.max(self.geom.npix)) return HpxNDMap(copy.deepcopy(self.geom), z) else: self.logger.error('Did not recognize CountsMap type %i' % p_method, exc_info=True) return None
Return 3-D counts map for this component as a Map object. Returns ------- map : `~fermipy.skymap.MapBase`
def convolve(data, h, res_g=None, sub_blocks=None): """ convolves 1d-3d data with kernel h data and h can either be numpy arrays or gpu buffer objects (OCLArray, which must be float32 then) boundary conditions are clamping to zero at edge. """ if not len(data.shape) in [1, 2, 3]: raise ValueError("dim = %s not supported" % (len(data.shape))) if len(data.shape) != len(h.shape): raise ValueError("dimemnsion of data (%s) and h (%s) are different" % (len(data.shape), len(h.shape))) if isinstance(data, OCLArray) and isinstance(h, OCLArray): return _convolve_buf(data, h, res_g) elif isinstance(data, np.ndarray) and isinstance(h, np.ndarray): if sub_blocks == (1,) * len(data.shape) or sub_blocks is None: return _convolve_np(data, h) else: # cut the image into tile and operate on every of them N_sub = [int(np.ceil(1. * n / s)) for n, s in zip(data.shape, sub_blocks)] Npads = [int(s / 2) for s in h.shape] res = np.empty(data.shape, np.float32) for data_tile, data_s_src, data_s_dest \ in tile_iterator(data, blocksize=N_sub, padsize=Npads, mode="constant"): res_tile = _convolve_np(data_tile.copy(), h) res[data_s_src] = res_tile[data_s_dest] return res else: raise TypeError("unknown types (%s, %s)" % (type(data), type(h)))
convolves 1d-3d data with kernel h data and h can either be numpy arrays or gpu buffer objects (OCLArray, which must be float32 then) boundary conditions are clamping to zero at edge.
def start(sc, timedelta_formatter=_pretty_time_delta, bar_width=20, sleep_time=0.5): """Creates a :class:`ProgressPrinter` that polls the SparkContext for information about active stage progress and prints that information to stderr. The printer runs in a thread and is useful for showing text-based progress bars in interactive environments (e.g., REPLs, Jupyter Notebooks). This function creates a singleton printer instance and returns that instance no matter what arguments are passed to this function again until :func:`stop` is called to shutdown the singleton. If you want more control over the printer lifecycle, create an instance of :class:`ProgressPrinter` directly and use its methods. Parameters ---------- sc: :class:`pyspark.context.SparkContext`, optional SparkContext to use to create a new thread timedelta_formatter : callable, optional Converts a timedelta to a string. bar_width : int, optional Width of the progressbar to print out. sleep_time : float, optional Frequency in seconds with which to poll Apache Spark for task stage information. Returns ------- :class:`ProgressPrinter` """ global _printer_singleton if _printer_singleton is None: _printer_singleton = ProgressPrinter(sc, timedelta_formatter, bar_width, sleep_time) _printer_singleton.start() return _printer_singleton
Creates a :class:`ProgressPrinter` that polls the SparkContext for information about active stage progress and prints that information to stderr. The printer runs in a thread and is useful for showing text-based progress bars in interactive environments (e.g., REPLs, Jupyter Notebooks). This function creates a singleton printer instance and returns that instance no matter what arguments are passed to this function again until :func:`stop` is called to shutdown the singleton. If you want more control over the printer lifecycle, create an instance of :class:`ProgressPrinter` directly and use its methods. Parameters ---------- sc: :class:`pyspark.context.SparkContext`, optional SparkContext to use to create a new thread timedelta_formatter : callable, optional Converts a timedelta to a string. bar_width : int, optional Width of the progressbar to print out. sleep_time : float, optional Frequency in seconds with which to poll Apache Spark for task stage information. Returns ------- :class:`ProgressPrinter`
def atan(x): """ tan(x) Trigonometric arc tan function. """ _math = infer_math(x) if _math is math: return _math.atan(x) else: return _math.arctan(x)
tan(x) Trigonometric arc tan function.
def _get_3d_plot(self, label_stable=True): """ Shows the plot using pylab. Usually I won"t do imports in methods, but since plotting is a fairly expensive library to load and not all machines have matplotlib installed, I have done it this way. """ import matplotlib.pyplot as plt import mpl_toolkits.mplot3d.axes3d as p3 from matplotlib.font_manager import FontProperties fig = plt.figure() ax = p3.Axes3D(fig) font = FontProperties() font.set_weight("bold") font.set_size(20) (lines, labels, unstable) = self.pd_plot_data count = 1 newlabels = list() for x, y, z in lines: ax.plot(x, y, z, "bo-", linewidth=3, markeredgecolor="b", markerfacecolor="r", markersize=10) for coords in sorted(labels.keys()): entry = labels[coords] label = entry.name if label_stable: if len(entry.composition.elements) == 1: ax.text(coords[0], coords[1], coords[2], label) else: ax.text(coords[0], coords[1], coords[2], str(count)) newlabels.append("{} : {}".format(count, latexify(label))) count += 1 plt.figtext(0.01, 0.01, "\n".join(newlabels)) ax.axis("off") return plt
Shows the plot using pylab. Usually I won"t do imports in methods, but since plotting is a fairly expensive library to load and not all machines have matplotlib installed, I have done it this way.
def upsert(self, doc, namespace, timestamp): """Update or insert a document into Solr This method should call whatever add/insert/update method exists for the backend engine and add the document in there. The input will always be one mongo document, represented as a Python dictionary. """ if self.auto_commit_interval is not None: self.solr.add([self._clean_doc(doc, namespace, timestamp)], commit=(self.auto_commit_interval == 0), commitWithin=u(self.auto_commit_interval)) else: self.solr.add([self._clean_doc(doc, namespace, timestamp)], commit=False)
Update or insert a document into Solr This method should call whatever add/insert/update method exists for the backend engine and add the document in there. The input will always be one mongo document, represented as a Python dictionary.
def increment(self, member, amount=1): """Increment the score of ``member`` by ``amount``.""" self._dict[member] += amount return self._dict[member]
Increment the score of ``member`` by ``amount``.
def _bson_to_dict(data, opts): """Decode a BSON string to document_class.""" try: if _raw_document_class(opts.document_class): return opts.document_class(data, opts) _, end = _get_object_size(data, 0, len(data)) return _elements_to_dict(data, 4, end, opts) except InvalidBSON: raise except Exception: # Change exception type to InvalidBSON but preserve traceback. _, exc_value, exc_tb = sys.exc_info() reraise(InvalidBSON, exc_value, exc_tb)
Decode a BSON string to document_class.
def get_permalink_ids_iter(self): ''' Method to get permalink ids from content. To be bound to the class last thing. ''' permalink_id_key = self.settings['PERMALINK_ID_METADATA_KEY'] permalink_ids = self.metadata.get(permalink_id_key, '') for permalink_id in permalink_ids.split(','): if permalink_id: yield permalink_id.strip()
Method to get permalink ids from content. To be bound to the class last thing.
def linear_copy(self, deep=False): """ Returns a copy of the input unstructured grid containing only linear cells. Converts the following cell types to their linear equivalents. - VTK_QUADRATIC_TETRA --> VTK_TETRA - VTK_QUADRATIC_PYRAMID --> VTK_PYRAMID - VTK_QUADRATIC_WEDGE --> VTK_WEDGE - VTK_QUADRATIC_HEXAHEDRON --> VTK_HEXAHEDRON Parameters ---------- deep : bool When True, makes a copy of the points array. Default False. Cells and cell types are always copied. Returns ------- grid : vtki.UnstructuredGrid UnstructuredGrid containing only linear cells. """ lgrid = self.copy(deep) # grab the vtk object vtk_cell_type = numpy_to_vtk(self.GetCellTypesArray(), deep=True) celltype = vtk_to_numpy(vtk_cell_type) celltype[celltype == VTK_QUADRATIC_TETRA] = VTK_TETRA celltype[celltype == VTK_QUADRATIC_PYRAMID] = VTK_PYRAMID celltype[celltype == VTK_QUADRATIC_WEDGE] = VTK_WEDGE celltype[celltype == VTK_QUADRATIC_HEXAHEDRON] = VTK_HEXAHEDRON # track quad mask for later quad_quad_mask = celltype == VTK_QUADRATIC_QUAD celltype[quad_quad_mask] = VTK_QUAD quad_tri_mask = celltype == VTK_QUADRATIC_TRIANGLE celltype[quad_tri_mask] = VTK_TRIANGLE vtk_offset = self.GetCellLocationsArray() cells = vtk.vtkCellArray() cells.DeepCopy(self.GetCells()) lgrid.SetCells(vtk_cell_type, vtk_offset, cells) # fixing bug with display of quad cells if np.any(quad_quad_mask): quad_offset = lgrid.offset[quad_quad_mask] base_point = lgrid.cells[quad_offset + 1] lgrid.cells[quad_offset + 5] = base_point lgrid.cells[quad_offset + 6] = base_point lgrid.cells[quad_offset + 7] = base_point lgrid.cells[quad_offset + 8] = base_point if np.any(quad_tri_mask): tri_offset = lgrid.offset[quad_tri_mask] base_point = lgrid.cells[tri_offset + 1] lgrid.cells[tri_offset + 4] = base_point lgrid.cells[tri_offset + 5] = base_point lgrid.cells[tri_offset + 6] = base_point return lgrid
Returns a copy of the input unstructured grid containing only linear cells. Converts the following cell types to their linear equivalents. - VTK_QUADRATIC_TETRA --> VTK_TETRA - VTK_QUADRATIC_PYRAMID --> VTK_PYRAMID - VTK_QUADRATIC_WEDGE --> VTK_WEDGE - VTK_QUADRATIC_HEXAHEDRON --> VTK_HEXAHEDRON Parameters ---------- deep : bool When True, makes a copy of the points array. Default False. Cells and cell types are always copied. Returns ------- grid : vtki.UnstructuredGrid UnstructuredGrid containing only linear cells.
def _build_master(cls): """ Prepare the master working set. """ ws = cls() try: from __main__ import __requires__ except ImportError: # The main program does not list any requirements return ws # ensure the requirements are met try: ws.require(__requires__) except VersionConflict: return cls._build_from_requirements(__requires__) return ws
Prepare the master working set.
def match_objective_id(self, objective_id, match): """Sets the objective ``Id`` for this query. arg: objective_id (osid.id.Id): an objective ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``objective_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ if not isinstance(objective_id, Id): raise errors.InvalidArgument() self._add_match('objectiveId', str(objective_id), match)
Sets the objective ``Id`` for this query. arg: objective_id (osid.id.Id): an objective ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``objective_id`` is ``null`` *compliance: mandatory -- This method must be implemented.*
def replace_namespaced_replica_set_scale(self, name, namespace, body, **kwargs): """ replace scale of the specified ReplicaSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_replica_set_scale(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Scale (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Scale body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1Scale If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_replica_set_scale_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_replica_set_scale_with_http_info(name, namespace, body, **kwargs) return data
replace scale of the specified ReplicaSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_replica_set_scale(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Scale (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Scale body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1Scale If the method is called asynchronously, returns the request thread.
def drop_table(self, tablename, silent=False): """ Drop a table :Parameters: - tablename: string - slient: boolean. If false and the table doesn't exists an exception will be raised; Otherwise it will be ignored :Return: Nothing """ if not silent and not self.is_table_existed(tablename): raise MonSQLException('TABLE %s DOES NOT EXIST' %tablename) self.__cursor.execute('DROP TABLE IF EXISTS %s' %(tablename)) self.__db.commit()
Drop a table :Parameters: - tablename: string - slient: boolean. If false and the table doesn't exists an exception will be raised; Otherwise it will be ignored :Return: Nothing
def get_classname(o): """ Returns the classname of an object r a class :param o: :return: """ if inspect.isclass(o): target = o elif callable(o): target = o else: target = o.__class__ try: return target.__qualname__ except AttributeError: # pragma: no cover return target.__name__
Returns the classname of an object r a class :param o: :return:
def igphyml(input_file=None, tree_file=None, root=None, verbose=False): ''' Computes a phylogenetic tree using IgPhyML. .. note:: IgPhyML must be installed. It can be downloaded from https://github.com/kbhoehn/IgPhyML. Args: input_file (str): Path to a Phylip-formatted multiple sequence alignment. Required. tree_file (str): Path to the output tree file. root (str): Name of the root sequence. Required. verbose (bool): If `True`, prints the standard output and standard error for each IgPhyML run. Default is `False`. ''' if shutil.which('igphyml') is None: raise RuntimeError('It appears that IgPhyML is not installed.\nPlease install and try again.') # first, tree topology is estimated with the M0/GY94 model igphyml_cmd1 = 'igphyml -i {} -m GY -w M0 -t e --run_id gy94'.format(aln_file) p1 = sp.Popen(igphyml_cmd1, stdout=sp.PIPE, stderr=sp.PIPE) stdout1, stderr1 = p1.communicate() if verbose: print(stdout1 + '\n') print(stderr1 + '\n\n') intermediate = input_file + '_igphyml_tree.txt_gy94' # now we fit the HLP17 model once the tree topology is fixed igphyml_cmd2 = 'igphyml -i {0} -m HLP17 --root {1} -o lr -u {}_igphyml_tree.txt_gy94 -o {}'.format(input_file, root, tree_file) p2 = sp.Popen(igphyml_cmd2, stdout=sp.PIPE, stderr=sp.PIPE) stdout2, stderr2 = p2.communicate() if verbose: print(stdout2 + '\n') print(stderr2 + '\n') return tree_file + '_igphyml_tree.txt'
Computes a phylogenetic tree using IgPhyML. .. note:: IgPhyML must be installed. It can be downloaded from https://github.com/kbhoehn/IgPhyML. Args: input_file (str): Path to a Phylip-formatted multiple sequence alignment. Required. tree_file (str): Path to the output tree file. root (str): Name of the root sequence. Required. verbose (bool): If `True`, prints the standard output and standard error for each IgPhyML run. Default is `False`.
def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bin/sh'): ''' run a command on the remote host ''' vvv("EXEC COMMAND %s" % cmd) if self.runner.sudo and sudoable: raise errors.AnsibleError("fireball does not use sudo, but runs as whoever it was initiated as. (That itself is where to use sudo).") data = dict( mode='command', cmd=cmd, tmp_path=tmp_path, executable=executable, ) data = utils.jsonify(data) data = utils.encrypt(self.key, data) self.socket.send(data) response = self.socket.recv() response = utils.decrypt(self.key, response) response = utils.parse_json(response) return (response.get('rc',None), '', response.get('stdout',''), response.get('stderr',''))
run a command on the remote host
def _try_dump(cnf, outpath, otype, fmsg, extra_opts=None): """ :param cnf: Configuration object to print out :param outpath: Output file path or None :param otype: Output type or None :param fmsg: message if it cannot detect otype by 'inpath' :param extra_opts: Map object will be given to API.dump as extra options """ if extra_opts is None: extra_opts = {} try: API.dump(cnf, outpath, otype, **extra_opts) except API.UnknownFileTypeError: _exit_with_output(fmsg % outpath, 1) except API.UnknownProcessorTypeError: _exit_with_output("Invalid output type '%s'" % otype, 1)
:param cnf: Configuration object to print out :param outpath: Output file path or None :param otype: Output type or None :param fmsg: message if it cannot detect otype by 'inpath' :param extra_opts: Map object will be given to API.dump as extra options
def render_export_form(self, request, context, form_url=''): """ Render the from submission export form. """ context.update({ 'has_change_permission': self.has_change_permission(request), 'form_url': mark_safe(form_url), 'opts': self.opts, 'add': True, 'save_on_top': self.save_on_top, }) return TemplateResponse(request, self.export_form_template, context)
Render the from submission export form.
def feet(kilometers=0, meters=0, miles=0, nautical=0): """ TODO docs. """ ret = 0. if nautical: kilometers += nautical / nm(1.) if meters: kilometers += meters / 1000. if kilometers: miles += mi(kilometers=kilometers) ret += miles * 5280 return ret
TODO docs.
def get_bootdev(self): """Get current boot device override information. Provides the current requested boot device. Be aware that not all IPMI devices support this. Even in BMCs that claim to, occasionally the BIOS or UEFI fail to honor it. This is usually only applicable to the next reboot. :raises: IpmiException on an error. :returns: dict --The response will be provided in the return as a dict """ response = self.raw_command(netfn=0, command=9, data=(5, 0, 0)) # interpret response per 'get system boot options' if 'error' in response: raise exc.IpmiException(response['error']) # this should only be invoked for get system boot option complying to # ipmi spec and targeting the 'boot flags' parameter assert (response['command'] == 9 and response['netfn'] == 1 and response['data'][0] == 1 and (response['data'][1] & 0b1111111) == 5) if (response['data'][1] & 0b10000000 or not response['data'][2] & 0b10000000): return {'bootdev': 'default', 'persistent': True} else: # will consult data2 of the boot flags parameter for the data persistent = False uefimode = False if response['data'][2] & 0b1000000: persistent = True if response['data'][2] & 0b100000: uefimode = True bootnum = (response['data'][3] & 0b111100) >> 2 bootdev = boot_devices.get(bootnum) if bootdev: return {'bootdev': bootdev, 'persistent': persistent, 'uefimode': uefimode} else: return {'bootdev': bootnum, 'persistent': persistent, 'uefimode': uefimode}
Get current boot device override information. Provides the current requested boot device. Be aware that not all IPMI devices support this. Even in BMCs that claim to, occasionally the BIOS or UEFI fail to honor it. This is usually only applicable to the next reboot. :raises: IpmiException on an error. :returns: dict --The response will be provided in the return as a dict
def random_rollout_subsequences(rollouts, num_subsequences, subsequence_length): """Chooses a random frame sequence of given length from a set of rollouts.""" def choose_subsequence(): # TODO(koz4k): Weigh rollouts by their lengths so sampling is uniform over # frames and not rollouts. rollout = random.choice(rollouts) try: from_index = random.randrange(len(rollout) - subsequence_length + 1) except ValueError: # Rollout too short; repeat. return choose_subsequence() return rollout[from_index:(from_index + subsequence_length)] return [choose_subsequence() for _ in range(num_subsequences)]
Chooses a random frame sequence of given length from a set of rollouts.
def _add_flow_v1_2(self, src, port, timeout, datapath): """enter a flow entry for the packet from the slave i/f with idle_timeout. for OpenFlow ver1.2 and ver1.3.""" ofproto = datapath.ofproto parser = datapath.ofproto_parser match = parser.OFPMatch( in_port=port, eth_src=src, eth_type=ether.ETH_TYPE_SLOW) actions = [parser.OFPActionOutput( ofproto.OFPP_CONTROLLER, ofproto.OFPCML_MAX)] inst = [parser.OFPInstructionActions( ofproto.OFPIT_APPLY_ACTIONS, actions)] mod = parser.OFPFlowMod( datapath=datapath, command=ofproto.OFPFC_ADD, idle_timeout=timeout, priority=65535, flags=ofproto.OFPFF_SEND_FLOW_REM, match=match, instructions=inst) datapath.send_msg(mod)
enter a flow entry for the packet from the slave i/f with idle_timeout. for OpenFlow ver1.2 and ver1.3.
def keyring_remove(key, yes, **kwargs): """ Removes a public key from the keyring. Does nothing if a key is already not in the keyring. If none is specified - clears the keyring. To force the cocaine-runtime to refresh its keyring, call `refresh` method. """ if key is None: if not yes: click.confirm('Are you sure you want to remove all keys?', abort=True) ctx = Context(**kwargs) ctx.execute_action('keyring:remove', **{ 'key': key, 'storage': ctx.repo.create_secure_service('storage'), })
Removes a public key from the keyring. Does nothing if a key is already not in the keyring. If none is specified - clears the keyring. To force the cocaine-runtime to refresh its keyring, call `refresh` method.
def build_to_target_size_from_token_counts(cls, target_size, token_counts, min_val, max_val, num_iterations=4): """Builds a SubwordTextTokenizer that has `vocab_size` near `target_size`. Uses simple recursive binary search to find a minimum token count that most closely matches the `target_size`. Args: target_size: Desired vocab_size to approximate. token_counts: A dictionary of token counts, mapping string to int. min_val: An integer; lower bound for the minimum token count. max_val: An integer; upper bound for the minimum token count. num_iterations: An integer; how many iterations of refinement. Returns: A SubwordTextTokenizer instance. Raises: ValueError: If `min_val` is greater than `max_val`. """ if min_val > max_val: raise ValueError("Lower bound for the minimum token count " "is greater than the upper bound.") def bisect(min_val, max_val): """Bisection to find the right size.""" present_count = (max_val + min_val) // 2 logger.info("Trying min_count %d" % present_count) subtokenizer = cls() subtokenizer.build_from_token_counts(token_counts, present_count, num_iterations) logger.info("min_count %d attained a %d vocab_size", present_count, subtokenizer.vocab_size) # If min_val == max_val, we can't do any better than this. if subtokenizer.vocab_size == target_size or min_val >= max_val: return subtokenizer if subtokenizer.vocab_size > target_size: other_subtokenizer = bisect(present_count + 1, max_val) else: other_subtokenizer = bisect(min_val, present_count - 1) if other_subtokenizer is None: return subtokenizer if (abs(other_subtokenizer.vocab_size - target_size) < abs(subtokenizer.vocab_size - target_size)): return other_subtokenizer return subtokenizer return bisect(min_val, max_val)
Builds a SubwordTextTokenizer that has `vocab_size` near `target_size`. Uses simple recursive binary search to find a minimum token count that most closely matches the `target_size`. Args: target_size: Desired vocab_size to approximate. token_counts: A dictionary of token counts, mapping string to int. min_val: An integer; lower bound for the minimum token count. max_val: An integer; upper bound for the minimum token count. num_iterations: An integer; how many iterations of refinement. Returns: A SubwordTextTokenizer instance. Raises: ValueError: If `min_val` is greater than `max_val`.
def to_identifier(s): """ Convert snake_case to camel_case. """ if s.startswith('GPS'): s = 'Gps' + s[3:] return ''.join([i.capitalize() for i in s.split('_')]) if '_' in s else s
Convert snake_case to camel_case.
def define_objective_with_I(I, *args): """Define a polynomial using measurements and an I matrix describing a Bell inequality. :param I: The I matrix of a Bell inequality in the Collins-Gisin notation. :type I: list of list of int. :param args: Either the measurements of Alice and Bob or a `Probability` class describing their measurement operators. :type A: tuple of list of list of :class:`sympy.physics.quantum.operator.HermitianOperator` or :class:`ncpol2sdpa.Probability` :returns: :class:`sympy.core.expr.Expr` -- the objective function to be solved as a minimization problem to find the maximum quantum violation. Note that the sign is flipped compared to the Bell inequality. """ objective = I[0][0] if len(args) > 2 or len(args) == 0: raise Exception("Wrong number of arguments!") elif len(args) == 1: A = args[0].parties[0] B = args[0].parties[1] else: A = args[0] B = args[1] i, j = 0, 1 # Row and column index in I for m_Bj in B: # Define first row for Bj in m_Bj: objective += I[i][j] * Bj j += 1 i += 1 for m_Ai in A: for Ai in m_Ai: objective += I[i][0] * Ai j = 1 for m_Bj in B: for Bj in m_Bj: objective += I[i][j] * Ai * Bj j += 1 i += 1 return -objective
Define a polynomial using measurements and an I matrix describing a Bell inequality. :param I: The I matrix of a Bell inequality in the Collins-Gisin notation. :type I: list of list of int. :param args: Either the measurements of Alice and Bob or a `Probability` class describing their measurement operators. :type A: tuple of list of list of :class:`sympy.physics.quantum.operator.HermitianOperator` or :class:`ncpol2sdpa.Probability` :returns: :class:`sympy.core.expr.Expr` -- the objective function to be solved as a minimization problem to find the maximum quantum violation. Note that the sign is flipped compared to the Bell inequality.
def _init_individual(subjs, voxels, TRs): """Initializes the individual components `S_i` to empty (all zeros). Parameters ---------- subjs : int The number of subjects. voxels : list of int A list with the number of voxels per subject. TRs : int The number of timepoints in the data. Returns ------- S : list of 2D array, element i has shape=[voxels_i, timepoints] The individual component :math:`S_i` for each subject initialized to zero. """ return [np.zeros((voxels[i], TRs)) for i in range(subjs)]
Initializes the individual components `S_i` to empty (all zeros). Parameters ---------- subjs : int The number of subjects. voxels : list of int A list with the number of voxels per subject. TRs : int The number of timepoints in the data. Returns ------- S : list of 2D array, element i has shape=[voxels_i, timepoints] The individual component :math:`S_i` for each subject initialized to zero.
def post_copy_notes(self, post_id, other_post_id): """Function to copy notes (requires login). Parameters: post_id (int): other_post_id (int): The id of the post to copy notes to. """ return self._get('posts/{0}/copy_notes.json'.format(post_id), {'other_post_id': other_post_id}, 'PUT', auth=True)
Function to copy notes (requires login). Parameters: post_id (int): other_post_id (int): The id of the post to copy notes to.
def Stop(self): """Signals the worker threads to shut down and waits until it exits.""" self._shutdown = True self._new_updates.set() # Wake up the transmission thread. if self._main_thread is not None: self._main_thread.join() self._main_thread = None if self._transmission_thread is not None: self._transmission_thread.join() self._transmission_thread = None
Signals the worker threads to shut down and waits until it exits.
def _TypecheckDecorator(subject=None, **kwargs): """Dispatches type checks based on what the subject is. Functions or methods are annotated directly. If this method is called with keyword arguments only, return a decorator. """ if subject is None: return _TypecheckDecoratorFactory(kwargs) elif inspect.isfunction(subject) or inspect.ismethod(subject): return _TypecheckFunction(subject, {}, 2, None) else: raise TypeError()
Dispatches type checks based on what the subject is. Functions or methods are annotated directly. If this method is called with keyword arguments only, return a decorator.
def _parse_row(rowvalues, rowtypes): """ Scan a single row from an Excel file, and return the list of ranges corresponding to each consecutive span of non-empty cells in this row. If all cells are empty, return an empty list. Each "range" in the list is a tuple of the form `(startcol, endcol)`. For example, if the row is the following: [ ][ 1.0 ][ 23 ][ "foo" ][ ][ "hello" ][ ] then the returned list of ranges will be: [(1, 4), (5, 6)] This algorithm considers a cell to be empty if its type is 0 (XL_EMPTY), or 6 (XL_BLANK), or if it's a text cell containing empty string, or a whitespace-only string. Numeric `0` is not considered empty. """ n = len(rowvalues) assert n == len(rowtypes) if not n: return [] range_start = None ranges = [] for i in range(n): ctype = rowtypes[i] cval = rowvalues[i] # Check whether the cell is empty or not. If it is empty, and there is # an active range being tracked - terminate it. On the other hand, if # the cell is not empty and there isn't an active range, then start it. if ctype == 0 or ctype == 6 or (ctype == 1 and (cval == "" or cval.isspace())): if range_start is not None: ranges.append((range_start, i)) range_start = None else: if range_start is None: range_start = i if range_start is not None: ranges.append((range_start, n)) return ranges
Scan a single row from an Excel file, and return the list of ranges corresponding to each consecutive span of non-empty cells in this row. If all cells are empty, return an empty list. Each "range" in the list is a tuple of the form `(startcol, endcol)`. For example, if the row is the following: [ ][ 1.0 ][ 23 ][ "foo" ][ ][ "hello" ][ ] then the returned list of ranges will be: [(1, 4), (5, 6)] This algorithm considers a cell to be empty if its type is 0 (XL_EMPTY), or 6 (XL_BLANK), or if it's a text cell containing empty string, or a whitespace-only string. Numeric `0` is not considered empty.
def variable_names(self): """ Returns the names of all environment variables. :return: the names of the variables :rtype: list """ result = [] names = javabridge.call(self.jobject, "getVariableNames", "()Ljava/util/Set;") for name in javabridge.iterate_collection(names): result.append(javabridge.to_string(name)) return result
Returns the names of all environment variables. :return: the names of the variables :rtype: list
def dot(self, serie, r_max): """Draw a dot line""" serie_node = self.svg.serie(serie) view_values = list(map(self.view, serie.points)) for i, value in safe_enumerate(serie.values): x, y = view_values[i] if self.logarithmic: log10min = log10(self._min) - 1 log10max = log10(self._max or 1) if value != 0: size = r_max * ((log10(abs(value)) - log10min) / (log10max - log10min)) else: size = 0 else: size = r_max * (abs(value) / (self._max or 1)) metadata = serie.metadata.get(i) dots = decorate( self.svg, self.svg.node(serie_node['plot'], class_="dots"), metadata ) alter( self.svg.node( dots, 'circle', cx=x, cy=y, r=size, class_='dot reactive tooltip-trigger' + (' negative' if value < 0 else '') ), metadata ) val = self._format(serie, i) self._tooltip_data( dots, val, x, y, 'centered', self._get_x_label(i) ) self._static_value(serie_node, val, x, y, metadata)
Draw a dot line
def create_entity(self): """Create entity if `flow_collection` is defined in process. Following rules applies for adding `Data` object to `Entity`: * Only add `Data object` to `Entity` if process has defined `flow_collection` field * Add object to existing `Entity`, if all parents that are part of it (but not necessary all parents), are part of the same `Entity` * If parents belong to different `Entities` or do not belong to any `Entity`, create new `Entity` """ entity_type = self.process.entity_type # pylint: disable=no-member entity_descriptor_schema = self.process.entity_descriptor_schema # pylint: disable=no-member entity_input = self.process.entity_input # pylint: disable=no-member if entity_type: data_filter = {} if entity_input: input_id = dict_dot(self.input, entity_input, default=lambda: None) if input_id is None: logger.warning("Skipping creation of entity due to missing input.") return if isinstance(input_id, int): data_filter['data__pk'] = input_id elif isinstance(input_id, list): data_filter['data__pk__in'] = input_id else: raise ValueError( "Cannot create entity due to invalid value of field {}.".format(entity_input) ) else: data_filter['data__in'] = self.parents.all() # pylint: disable=no-member entity_query = Entity.objects.filter(type=entity_type, **data_filter).distinct() entity_count = entity_query.count() if entity_count == 0: descriptor_schema = DescriptorSchema.objects.filter( slug=entity_descriptor_schema ).latest() entity = Entity.objects.create( contributor=self.contributor, descriptor_schema=descriptor_schema, type=entity_type, name=self.name, tags=self.tags, ) assign_contributor_permissions(entity) elif entity_count == 1: entity = entity_query.first() copy_permissions(entity, self) else: logger.info("Skipping creation of entity due to multiple entities found.") entity = None if entity: entity.data.add(self) # Inherit collections from entity. for collection in entity.collections.all(): collection.data.add(self)
Create entity if `flow_collection` is defined in process. Following rules applies for adding `Data` object to `Entity`: * Only add `Data object` to `Entity` if process has defined `flow_collection` field * Add object to existing `Entity`, if all parents that are part of it (but not necessary all parents), are part of the same `Entity` * If parents belong to different `Entities` or do not belong to any `Entity`, create new `Entity`
def message_to_objects( message: str, sender: str, sender_key_fetcher:Callable[[str], str]=None, user: UserType =None, ) -> List: """Takes in a message extracted by a protocol and maps it to entities. :param message: XML payload :type message: str :param sender: Payload sender id :type message: str :param sender_key_fetcher: Function to fetch sender public key. If not given, key will always be fetched over network. The function should take sender handle as the only parameter. :param user: Optional receiving user object. If given, should have a `handle`. :returns: list of entities """ doc = etree.fromstring(message) if doc.tag in TAGS: return element_to_objects(doc, sender, sender_key_fetcher, user) return []
Takes in a message extracted by a protocol and maps it to entities. :param message: XML payload :type message: str :param sender: Payload sender id :type message: str :param sender_key_fetcher: Function to fetch sender public key. If not given, key will always be fetched over network. The function should take sender handle as the only parameter. :param user: Optional receiving user object. If given, should have a `handle`. :returns: list of entities
def get_image(verbose=False): """Get the image as a TensorFlow variable. Returns: A `tf.Variable`, which must be initialized prior to use: invoke `sess.run(result.initializer)`.""" base_data = tf.constant(image_data(verbose=verbose)) base_image = tf.image.decode_image(base_data, channels=3) base_image.set_shape((IMAGE_HEIGHT, IMAGE_WIDTH, 3)) parsed_image = tf.Variable(base_image, name='image', dtype=tf.uint8) return parsed_image
Get the image as a TensorFlow variable. Returns: A `tf.Variable`, which must be initialized prior to use: invoke `sess.run(result.initializer)`.
def list_instances(self, hourly=True, monthly=True, tags=None, cpus=None, memory=None, hostname=None, domain=None, local_disk=None, datacenter=None, nic_speed=None, public_ip=None, private_ip=None, **kwargs): """Retrieve a list of all virtual servers on the account. Example:: # Print out a list of hourly instances in the DAL05 data center. for vsi in mgr.list_instances(hourly=True, datacenter='dal05'): print vsi['fullyQualifiedDomainName'], vsi['primaryIpAddress'] # Using a custom object-mask. Will get ONLY what is specified object_mask = "mask[hostname,monitoringRobot[robotStatus]]" for vsi in mgr.list_instances(mask=object_mask,hourly=True): print vsi :param boolean hourly: include hourly instances :param boolean monthly: include monthly instances :param list tags: filter based on list of tags :param integer cpus: filter based on number of CPUS :param integer memory: filter based on amount of memory :param string hostname: filter based on hostname :param string domain: filter based on domain :param string local_disk: filter based on local_disk :param string datacenter: filter based on datacenter :param integer nic_speed: filter based on network speed (in MBPS) :param string public_ip: filter based on public ip address :param string private_ip: filter based on private ip address :param dict \\*\\*kwargs: response-level options (mask, limit, etc.) :returns: Returns a list of dictionaries representing the matching virtual servers """ if 'mask' not in kwargs: items = [ 'id', 'globalIdentifier', 'hostname', 'domain', 'fullyQualifiedDomainName', 'primaryBackendIpAddress', 'primaryIpAddress', 'lastKnownPowerState.name', 'powerState', 'maxCpu', 'maxMemory', 'datacenter', 'activeTransaction.transactionStatus[friendlyName,name]', 'status', ] kwargs['mask'] = "mask[%s]" % ','.join(items) call = 'getVirtualGuests' if not all([hourly, monthly]): if hourly: call = 'getHourlyVirtualGuests' elif monthly: call = 'getMonthlyVirtualGuests' _filter = utils.NestedDict(kwargs.get('filter') or {}) if tags: _filter['virtualGuests']['tagReferences']['tag']['name'] = { 'operation': 'in', 'options': [{'name': 'data', 'value': tags}], } if cpus: _filter['virtualGuests']['maxCpu'] = utils.query_filter(cpus) if memory: _filter['virtualGuests']['maxMemory'] = utils.query_filter(memory) if hostname: _filter['virtualGuests']['hostname'] = utils.query_filter(hostname) if domain: _filter['virtualGuests']['domain'] = utils.query_filter(domain) if local_disk is not None: _filter['virtualGuests']['localDiskFlag'] = ( utils.query_filter(bool(local_disk))) if datacenter: _filter['virtualGuests']['datacenter']['name'] = ( utils.query_filter(datacenter)) if nic_speed: _filter['virtualGuests']['networkComponents']['maxSpeed'] = ( utils.query_filter(nic_speed)) if public_ip: _filter['virtualGuests']['primaryIpAddress'] = ( utils.query_filter(public_ip)) if private_ip: _filter['virtualGuests']['primaryBackendIpAddress'] = ( utils.query_filter(private_ip)) kwargs['filter'] = _filter.to_dict() kwargs['iter'] = True return self.client.call('Account', call, **kwargs)
Retrieve a list of all virtual servers on the account. Example:: # Print out a list of hourly instances in the DAL05 data center. for vsi in mgr.list_instances(hourly=True, datacenter='dal05'): print vsi['fullyQualifiedDomainName'], vsi['primaryIpAddress'] # Using a custom object-mask. Will get ONLY what is specified object_mask = "mask[hostname,monitoringRobot[robotStatus]]" for vsi in mgr.list_instances(mask=object_mask,hourly=True): print vsi :param boolean hourly: include hourly instances :param boolean monthly: include monthly instances :param list tags: filter based on list of tags :param integer cpus: filter based on number of CPUS :param integer memory: filter based on amount of memory :param string hostname: filter based on hostname :param string domain: filter based on domain :param string local_disk: filter based on local_disk :param string datacenter: filter based on datacenter :param integer nic_speed: filter based on network speed (in MBPS) :param string public_ip: filter based on public ip address :param string private_ip: filter based on private ip address :param dict \\*\\*kwargs: response-level options (mask, limit, etc.) :returns: Returns a list of dictionaries representing the matching virtual servers
def tab(topics, complete): """Utility sub-command for tabcompletion This command is meant to be called by a tab completion function and is given a the currently entered topics, along with a boolean indicating whether or not the last entered argument is complete. """ # Discard `be tab` topics = list(topics)[2:] # When given an incomplete argument, # the argument is *sometimes* returned twice (?) # .. note:: Seen in Git Bash on Windows # $ be in giant [TAB] # -> ['giant'] # $ be in gi[TAB] # -> ['gi', 'gi'] if len(topics) > 1 and topics[-1] == topics[-2]: topics.pop() # Suggest projects if len(topics) == 0: projects = lib.list_projects(root=_extern.cwd()) sys.stdout.write(" ".join(projects)) elif len(topics) == 1: project = topics[0] projects = lib.list_projects(root=_extern.cwd()) # Complete project if not complete: projects = [i for i in projects if i.startswith(project)] sys.stdout.write(" ".join(projects)) else: # Suggest items from inventory inventory = _extern.load_inventory(project) inventory = lib.list_inventory(inventory) items = [i for i, b in inventory] sys.stdout.write(" ".join(items)) else: project, item = topics[:2] # Complete inventory item if len(topics) == 2 and not complete: inventory = _extern.load_inventory(project) inventory = lib.list_inventory(inventory) items = [i for i, b in inventory] items = [i for i in items if i.startswith(item)] sys.stdout.write(" ".join(items)) # Suggest items from template else: try: be = _extern.load_be(project) templates = _extern.load_templates(project) inventory = _extern.load_inventory(project) item = topics[-1] items = lib.list_template(root=_extern.cwd(), topics=topics, templates=templates, inventory=inventory, be=be) if not complete: items = lib.list_template(root=_extern.cwd(), topics=topics[:-1], templates=templates, inventory=inventory, be=be) items = [i for i in items if i.startswith(item)] sys.stdout.write(" ".join(items) + " ") else: sys.stdout.write(" ".join(items) + " ") except IndexError: sys.exit(lib.NORMAL)
Utility sub-command for tabcompletion This command is meant to be called by a tab completion function and is given a the currently entered topics, along with a boolean indicating whether or not the last entered argument is complete.
def user_can_edit_news(user): """ Check if the user has permission to edit any of the registered NewsItem types. """ newsitem_models = [model.get_newsitem_model() for model in NEWSINDEX_MODEL_CLASSES] if user.is_active and user.is_superuser: # admin can edit news iff any news types exist return bool(newsitem_models) for NewsItem in newsitem_models: for perm in format_perms(NewsItem, ['add', 'change', 'delete']): if user.has_perm(perm): return True return False
Check if the user has permission to edit any of the registered NewsItem types.
def do_handshake(self, timeout): 'perform a SSL/TLS handshake' tout = _timeout(timeout) if not self._blocking: return self._sslobj.do_handshake() while 1: try: return self._sslobj.do_handshake() except ssl.SSLError, exc: if exc.args[0] == ssl.SSL_ERROR_WANT_READ: self._wait_event(tout.now) continue elif exc.args[0] == ssl.SSL_ERROR_WANT_WRITE: self._wait_event(tout.now, write=True) continue raise self._wait_event(timeout) self._sslobj.do_handshake()
perform a SSL/TLS handshake
def recover_hashmap_model_from_data(model_class, original_data, modified_data, deleted_data, field_type): """ Function to reconstruct a model from DirtyModel basic information: original data, the modified and deleted fields. Necessary for pickle an object """ model = model_class(field_type=field_type[0](**field_type[1])) return set_model_internal_data(model, original_data, modified_data, deleted_data)
Function to reconstruct a model from DirtyModel basic information: original data, the modified and deleted fields. Necessary for pickle an object
def ls(args): """ List S3 buckets. See also "aws s3 ls". Use "aws s3 ls NAME" to list bucket contents. """ table = [] for bucket in filter_collection(resources.s3.buckets, args): bucket.LocationConstraint = clients.s3.get_bucket_location(Bucket=bucket.name)["LocationConstraint"] cloudwatch = resources.cloudwatch bucket_region = bucket.LocationConstraint or "us-east-1" if bucket_region != cloudwatch.meta.client.meta.region_name: cloudwatch = boto3.Session(region_name=bucket_region).resource("cloudwatch") data = get_cloudwatch_metric_stats("AWS/S3", "NumberOfObjects", start_time=datetime.utcnow() - timedelta(days=2), end_time=datetime.utcnow(), period=3600, BucketName=bucket.name, StorageType="AllStorageTypes", resource=cloudwatch) bucket.NumberOfObjects = int(data["Datapoints"][-1]["Average"]) if data["Datapoints"] else None data = get_cloudwatch_metric_stats("AWS/S3", "BucketSizeBytes", start_time=datetime.utcnow() - timedelta(days=2), end_time=datetime.utcnow(), period=3600, BucketName=bucket.name, StorageType="StandardStorage", resource=cloudwatch) bucket.BucketSizeBytes = format_number(data["Datapoints"][-1]["Average"]) if data["Datapoints"] else None table.append(bucket) page_output(tabulate(table, args))
List S3 buckets. See also "aws s3 ls". Use "aws s3 ls NAME" to list bucket contents.
def getAllowedMethods(self): """Returns the allowed methods for this analysis, either if the method was assigned directly (by using "Allows manual entry of results") or indirectly via Instrument ("Allows instrument entry of results") in Analysis Service Edit View. :return: A list with the methods allowed for this analysis :rtype: list of Methods """ service = self.getAnalysisService() if not service: return [] methods = [] if self.getManualEntryOfResults(): methods = service.getMethods() if self.getInstrumentEntryOfResults(): for instrument in service.getInstruments(): methods.extend(instrument.getMethods()) return list(set(methods))
Returns the allowed methods for this analysis, either if the method was assigned directly (by using "Allows manual entry of results") or indirectly via Instrument ("Allows instrument entry of results") in Analysis Service Edit View. :return: A list with the methods allowed for this analysis :rtype: list of Methods
def log(self, format_, args, level=logging.INFO): """ This function is called for anything that needs to get logged. It logs to the logger of this listener. It is not defined in the standard handler class; our version has an additional `level` argument that allows to control the logging level in the standard Python logging support. Another difference is that the variable arguments are passed in as a tuple. """ self.server.listener.logger.log(level, format_, *args)
This function is called for anything that needs to get logged. It logs to the logger of this listener. It is not defined in the standard handler class; our version has an additional `level` argument that allows to control the logging level in the standard Python logging support. Another difference is that the variable arguments are passed in as a tuple.
def PlaceCall(self, *Targets): """Places a call to a single user or creates a conference call. :Parameters: Targets : str One or more call targets. If multiple targets are specified, a conference call is created. The call target can be a Skypename, phone number, or speed dial code. :return: A call object. :rtype: `call.Call` """ calls = self.ActiveCalls reply = self._DoCommand('CALL %s' % ', '.join(Targets)) # Skype for Windows returns the call status which gives us the call Id; if reply.startswith('CALL '): return Call(self, chop(reply, 2)[1]) # On linux we get 'OK' as reply so we search for the new call on # list of active calls. for c in self.ActiveCalls: if c not in calls: return c raise SkypeError(0, 'Placing call failed')
Places a call to a single user or creates a conference call. :Parameters: Targets : str One or more call targets. If multiple targets are specified, a conference call is created. The call target can be a Skypename, phone number, or speed dial code. :return: A call object. :rtype: `call.Call`
def scale(self, s=None): """Set/get actor's scaling factor. :param s: scaling factor(s). :type s: float, list .. note:: if `s==(sx,sy,sz)` scale differently in the three coordinates.""" if s is None: return np.array(self.GetScale()) self.SetScale(s) return self
Set/get actor's scaling factor. :param s: scaling factor(s). :type s: float, list .. note:: if `s==(sx,sy,sz)` scale differently in the three coordinates.
def one(ctx, interactive, enable_phantomjs, enable_puppeteer, scripts): """ One mode not only means all-in-one, it runs every thing in one process over tornado.ioloop, for debug purpose """ ctx.obj['debug'] = False g = ctx.obj g['testing_mode'] = True if scripts: from pyspider.database.local.projectdb import ProjectDB g['projectdb'] = ProjectDB(scripts) if g.get('is_taskdb_default'): g['taskdb'] = connect_database('sqlite+taskdb://') if g.get('is_resultdb_default'): g['resultdb'] = None if enable_phantomjs: phantomjs_config = g.config.get('phantomjs', {}) phantomjs_obj = ctx.invoke(phantomjs, **phantomjs_config) if phantomjs_obj: g.setdefault('phantomjs_proxy', '127.0.0.1:%s' % phantomjs_obj.port) else: phantomjs_obj = None if enable_puppeteer: puppeteer_config = g.config.get('puppeteer', {}) puppeteer_obj = ctx.invoke(puppeteer, **puppeteer_config) if puppeteer_obj: g.setdefault('puppeteer_proxy', '127.0.0.1:%s' % puppeteer.port) else: puppeteer_obj = None result_worker_config = g.config.get('result_worker', {}) if g.resultdb is None: result_worker_config.setdefault('result_cls', 'pyspider.result.OneResultWorker') result_worker_obj = ctx.invoke(result_worker, **result_worker_config) processor_config = g.config.get('processor', {}) processor_config.setdefault('enable_stdout_capture', False) processor_obj = ctx.invoke(processor, **processor_config) fetcher_config = g.config.get('fetcher', {}) fetcher_config.setdefault('xmlrpc', False) fetcher_obj = ctx.invoke(fetcher, **fetcher_config) scheduler_config = g.config.get('scheduler', {}) scheduler_config.setdefault('xmlrpc', False) scheduler_config.setdefault('scheduler_cls', 'pyspider.scheduler.OneScheduler') scheduler_obj = ctx.invoke(scheduler, **scheduler_config) scheduler_obj.init_one(ioloop=fetcher_obj.ioloop, fetcher=fetcher_obj, processor=processor_obj, result_worker=result_worker_obj, interactive=interactive) if scripts: for project in g.projectdb.projects: scheduler_obj.trigger_on_start(project) try: scheduler_obj.run() finally: scheduler_obj.quit() if phantomjs_obj: phantomjs_obj.quit() if puppeteer_obj: puppeteer_obj.quit()
One mode not only means all-in-one, it runs every thing in one process over tornado.ioloop, for debug purpose
def place_analysis_summary_report(feature, parent): """Retrieve an HTML place analysis table report from a multi exposure analysis. """ _ = feature, parent # NOQA analysis_dir = get_analysis_dir(exposure_place['key']) if analysis_dir: return get_impact_report_as_string(analysis_dir) return None
Retrieve an HTML place analysis table report from a multi exposure analysis.
def _map_relations(relations, p, language='any'): ''' :param: :class:`list` relations: Relations to be mapped. These are concept or collection id's. :param: :class:`skosprovider.providers.VocabularyProvider` p: Provider to look up id's. :param string language: Language to render the relations' labels in :rtype: :class:`list` ''' ret = [] for r in relations: c = p.get_by_id(r) if c: ret.append(_map_relation(c, language)) else: log.warning( 'A relation references a concept or collection %d in provider %s that can not be found. Please check the integrity of your data.' % (r, p.get_vocabulary_id()) ) return ret
:param: :class:`list` relations: Relations to be mapped. These are concept or collection id's. :param: :class:`skosprovider.providers.VocabularyProvider` p: Provider to look up id's. :param string language: Language to render the relations' labels in :rtype: :class:`list`
def truncate_graph_bbox(G, north, south, east, west, truncate_by_edge=False, retain_all=False): """ Remove every node in graph that falls outside a bounding box. Needed because overpass returns entire ways that also include nodes outside the bbox if the way (that is, a way with a single OSM ID) has a node inside the bbox at some point. Parameters ---------- G : networkx multidigraph north : float northern latitude of bounding box south : float southern latitude of bounding box east : float eastern longitude of bounding box west : float western longitude of bounding box truncate_by_edge : bool if True retain node if it's outside bbox but at least one of node's neighbors are within bbox retain_all : bool if True, return the entire graph even if it is not connected Returns ------- networkx multidigraph """ start_time = time.time() G = G.copy() nodes_outside_bbox = [] for node, data in G.nodes(data=True): if data['y'] > north or data['y'] < south or data['x'] > east or data['x'] < west: # this node is outside the bounding box if not truncate_by_edge: # if we're not truncating by edge, add node to list of nodes # outside the bounding box nodes_outside_bbox.append(node) else: # if we're truncating by edge, see if any of node's neighbors # are within bounding box any_neighbors_in_bbox = False neighbors = list(G.successors(node)) + list(G.predecessors(node)) for neighbor in neighbors: x = G.nodes[neighbor]['x'] y = G.nodes[neighbor]['y'] if y < north and y > south and x < east and x > west: any_neighbors_in_bbox = True break # if none of its neighbors are within the bounding box, add node # to list of nodes outside the bounding box if not any_neighbors_in_bbox: nodes_outside_bbox.append(node) G.remove_nodes_from(nodes_outside_bbox) log('Truncated graph by bounding box in {:,.2f} seconds'.format(time.time()-start_time)) # remove any isolated nodes and retain only the largest component (if # retain_all is True) if not retain_all: G = remove_isolated_nodes(G) G = get_largest_component(G) return G
Remove every node in graph that falls outside a bounding box. Needed because overpass returns entire ways that also include nodes outside the bbox if the way (that is, a way with a single OSM ID) has a node inside the bbox at some point. Parameters ---------- G : networkx multidigraph north : float northern latitude of bounding box south : float southern latitude of bounding box east : float eastern longitude of bounding box west : float western longitude of bounding box truncate_by_edge : bool if True retain node if it's outside bbox but at least one of node's neighbors are within bbox retain_all : bool if True, return the entire graph even if it is not connected Returns ------- networkx multidigraph