positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def setViewMode( self, state = True ): """ Starts the view mode for moving around the scene. """ if self._viewMode == state: return self._viewMode = state if state: self._mainView.setDragMode( self._mainView.ScrollHandDrag ) else: self._mainView.setDragMode( self._mainView.RubberBandDrag ) self.emitViewModeChanged()
Starts the view mode for moving around the scene.
def manifest_download(self): '''download manifest files''' if self.downloaders_lock.acquire(False): if len(self.downloaders): # there already exist downloader threads self.downloaders_lock.release() return for url in ['http://firmware.ardupilot.org/manifest.json']: filename = self.make_safe_filename_from_url(url) path = mp_util.dot_mavproxy("manifest-%s" % filename) self.downloaders[url] = threading.Thread(target=self.download_url, args=(url, path)) self.downloaders[url].start() self.downloaders_lock.release() else: print("fw: Failed to acquire download lock")
download manifest files
def json2elem(json_data, factory=ET.Element): """Convert a JSON string into an Element. Whatever Element implementation we could import will be used by default; if you want to use something else, pass the Element class as the factory parameter. """ return internal_to_elem(json.loads(json_data), factory)
Convert a JSON string into an Element. Whatever Element implementation we could import will be used by default; if you want to use something else, pass the Element class as the factory parameter.
def passthrough_repl(self, inputstring, **kwargs): """Add back passthroughs.""" out = [] index = None for c in append_it(inputstring, None): try: if index is not None: if c is not None and c in nums: index += c elif c == unwrapper and index: ref = self.get_ref("passthrough", index) out.append(ref) index = None elif c != "\\" or index: out.append("\\" + index) if c is not None: out.append(c) index = None elif c is not None: if c == "\\": index = "" else: out.append(c) except CoconutInternalException as err: complain(err) if index is not None: out.append(index) index = None out.append(c) return "".join(out)
Add back passthroughs.
def case_study_social_link_linkedin(value): """ Confirms that the social media url is pointed at the correct domain. Args: value (string): The url to check. Raises: django.forms.ValidationError """ parsed = parse.urlparse(value.lower()) if not parsed.netloc.endswith('linkedin.com'): raise ValidationError(MESSAGE_NOT_LINKEDIN)
Confirms that the social media url is pointed at the correct domain. Args: value (string): The url to check. Raises: django.forms.ValidationError
def from_id(cls, id): """Load a `cls` entity and instantiate the Context it stores.""" from furious.context import Context # TODO: Handle exceptions and retries here. entity = cls.get_by_id(id) if not entity: raise FuriousContextNotFoundError( "Context entity not found for: {}".format(id)) return Context.from_dict(entity.context)
Load a `cls` entity and instantiate the Context it stores.
def getimage(app): """Get image file.""" # append source directory to TEMPLATE_PATH so template is found srcdir = os.path.abspath(os.path.dirname(__file__)) TEMPLATE_PATH.append(srcdir) staticbase = '_static' buildpath = os.path.join(app.outdir, staticbase) try: os.makedirs(buildpath) except OSError: if not os.path.isdir(buildpath): raise if app.config.sphinxmark_image == 'default': imagefile = 'watermark-draft.png' imagepath = os.path.join(srcdir, imagefile) copy(imagepath, buildpath) LOG.debug('[sphinxmark] Using default image: ' + imagefile) elif app.config.sphinxmark_image == 'text': imagefile = createimage(app, srcdir, buildpath) LOG.debug('[sphinxmark] Image: ' + imagefile) else: imagefile = app.config.sphinxmark_image if app.config.html_static_path: staticpath = app.config.html_static_path[0] else: staticpath = '_static' LOG.debug('[sphinxmark] static path: ' + staticpath) imagepath = os.path.join(app.confdir, staticpath, imagefile) LOG.debug('[sphinxmark] Imagepath: ' + imagepath) try: copy(imagepath, buildpath) except Exception: message = ("Cannot find '%s'. Put watermark images in the " "'_static' directory or specify the location using " "'html_static_path'." % imagefile) LOG.warning(message) LOG.warning('Failed to add watermark.') return return(buildpath, imagefile)
Get image file.
def get_val(source, extract=None, transform=None): """Extract a value from a source, transform and return it.""" if extract is None: raw_value = source else: raw_value = extract(source) if transform is None: return raw_value else: return transform(raw_value)
Extract a value from a source, transform and return it.
def count(self, *columns): """ Retrieve the "count" result of the query :param columns: The columns to get :type columns: tuple :return: The count :rtype: int """ if not columns and self.distinct_: columns = self.columns if not columns: columns = ["*"] return int(self.aggregate("count", *columns))
Retrieve the "count" result of the query :param columns: The columns to get :type columns: tuple :return: The count :rtype: int
def get_networkid(vm_): ''' Return the networkid to use, only valid for Advanced Zone ''' networkid = config.get_cloud_config_value('networkid', vm_, __opts__) if networkid is not None: return networkid else: return False
Return the networkid to use, only valid for Advanced Zone
def pl2nvc(plane): """ Return a unit normal vector and constant that define a specified plane. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pl2nvc_c.html :param plane: A SPICE plane. :type plane: supporttypes.Plane :return: A normal vector and constant defining the geometric plane represented by plane. :rtype: tuple """ assert (isinstance(plane, stypes.Plane)) normal = stypes.emptyDoubleVector(3) constant = ctypes.c_double() libspice.pl2nvc_c(ctypes.byref(plane), normal, ctypes.byref(constant)) return stypes.cVectorToPython(normal), constant.value
Return a unit normal vector and constant that define a specified plane. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pl2nvc_c.html :param plane: A SPICE plane. :type plane: supporttypes.Plane :return: A normal vector and constant defining the geometric plane represented by plane. :rtype: tuple
def p_generate_if(self, p): 'generate_if : IF LPAREN cond RPAREN gif_true_item ELSE gif_false_item' p[0] = IfStatement(p[3], p[5], p[7], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
generate_if : IF LPAREN cond RPAREN gif_true_item ELSE gif_false_item
def get_instance(self, payload): """ Build an instance of EnvironmentInstance :param dict payload: Payload response from the API :returns: twilio.rest.serverless.v1.service.environment.EnvironmentInstance :rtype: twilio.rest.serverless.v1.service.environment.EnvironmentInstance """ return EnvironmentInstance(self._version, payload, service_sid=self._solution['service_sid'], )
Build an instance of EnvironmentInstance :param dict payload: Payload response from the API :returns: twilio.rest.serverless.v1.service.environment.EnvironmentInstance :rtype: twilio.rest.serverless.v1.service.environment.EnvironmentInstance
def site_is_of_motif_type(struct, n, approach="min_dist", delta=0.1, \ cutoff=10.0, thresh=None): """ Returns the motif type of the site with index n in structure struct; currently featuring "tetrahedral", "octahedral", "bcc", and "cp" (close-packed: fcc and hcp) as well as "square pyramidal" and "trigonal bipyramidal". If the site is not recognized, "unrecognized" is returned. If a site should be assigned to two different motifs, "multiple assignments" is returned. Args: struct (Structure): input structure. n (int): index of site in Structure object for which motif type is to be determined. approach (str): type of neighbor-finding approach, where "min_dist" will use the MinimumDistanceNN class, "voronoi" the VoronoiNN class, "min_OKeeffe" the MinimumOKeeffe class, and "min_VIRE" the MinimumVIRENN class. delta (float): tolerance involved in neighbor finding. cutoff (float): (large) radius to find tentative neighbors. thresh (dict): thresholds for motif criteria (currently, required keys and their default values are "qtet": 0.5, "qoct": 0.5, "qbcc": 0.5, "q6": 0.4). Returns: motif type (str). """ if thresh is None: thresh = { "qtet": 0.5, "qoct": 0.5, "qbcc": 0.5, "q6": 0.4, "qtribipyr": 0.8, "qsqpyr": 0.8} ops = LocalStructOrderParams([ "cn", "tet", "oct", "bcc", "q6", "sq_pyr", "tri_bipyr"]) neighs_cent = get_neighbors_of_site_with_index( struct, n, approach=approach, delta=delta, cutoff=cutoff) neighs_cent.append(struct.sites[n]) opvals = ops.get_order_parameters( neighs_cent, len(neighs_cent) - 1, indices_neighs=[ i for i in range(len(neighs_cent) - 1)]) cn = int(opvals[0] + 0.5) motif_type = "unrecognized" nmotif = 0 if cn == 4 and opvals[1] > thresh["qtet"]: motif_type = "tetrahedral" nmotif += 1 if cn == 5 and opvals[5] > thresh["qsqpyr"]: motif_type = "square pyramidal" nmotif += 1 if cn == 5 and opvals[6] > thresh["qtribipyr"]: motif_type = "trigonal bipyramidal" nmotif += 1 if cn == 6 and opvals[2] > thresh["qoct"]: motif_type = "octahedral" nmotif += 1 if cn == 8 and (opvals[3] > thresh["qbcc"] and opvals[1] < thresh["qtet"]): motif_type = "bcc" nmotif += 1 if cn == 12 and (opvals[4] > thresh["q6"] and opvals[1] < thresh["q6"] and opvals[2] < thresh["q6"] and opvals[3] < thresh["q6"]): motif_type = "cp" nmotif += 1 if nmotif > 1: motif_type = "multiple assignments" return motif_type
Returns the motif type of the site with index n in structure struct; currently featuring "tetrahedral", "octahedral", "bcc", and "cp" (close-packed: fcc and hcp) as well as "square pyramidal" and "trigonal bipyramidal". If the site is not recognized, "unrecognized" is returned. If a site should be assigned to two different motifs, "multiple assignments" is returned. Args: struct (Structure): input structure. n (int): index of site in Structure object for which motif type is to be determined. approach (str): type of neighbor-finding approach, where "min_dist" will use the MinimumDistanceNN class, "voronoi" the VoronoiNN class, "min_OKeeffe" the MinimumOKeeffe class, and "min_VIRE" the MinimumVIRENN class. delta (float): tolerance involved in neighbor finding. cutoff (float): (large) radius to find tentative neighbors. thresh (dict): thresholds for motif criteria (currently, required keys and their default values are "qtet": 0.5, "qoct": 0.5, "qbcc": 0.5, "q6": 0.4). Returns: motif type (str).
def invert(self, src=None): """Calculate the inverted matrix. Return 0 if successful and replace current one. Else return 1 and do nothing. """ if src is None: dst = TOOLS._invert_matrix(self) else: dst = TOOLS._invert_matrix(src) if dst[0] == 1: return 1 self.a, self.b, self.c, self.d, self.e, self.f = dst[1] return 0
Calculate the inverted matrix. Return 0 if successful and replace current one. Else return 1 and do nothing.
def merge_tasks(core_collections, sandbox_collections, id_prefix, new_tasks, batch_size=100, wipe=False): """Merge core and sandbox collections into a temporary collection in the sandbox. :param core_collections: Core collection info :type core_collections: Collections :param sandbox_collections: Sandbox collection info :type sandbox_collections: Collections """ merged = copy.copy(sandbox_collections) # create/clear target collection target = merged.database[new_tasks] if wipe: _log.debug("merge_tasks.wipe.begin") target.remove() merged.database['counter'].remove() _log.debug("merge_tasks.wipe.end") # perform the merge batch = [] for doc in core_collections.tasks.find(): batch.append(doc) if len(batch) == batch_size: target.insert(batch) batch = [] if batch: target.insert(batch) batch = [] for doc in sandbox_collections.tasks.find(): doc['task_id'] = id_prefix + '-' + str(doc['task_id']) batch.append(doc) if len(batch) == batch_size: target.insert(batch) batch = [] if batch: target.insert(batch)
Merge core and sandbox collections into a temporary collection in the sandbox. :param core_collections: Core collection info :type core_collections: Collections :param sandbox_collections: Sandbox collection info :type sandbox_collections: Collections
def rm_files(path, extension): """ Remove all files in the given directory with the given extension :param str path: Directory :param str extension: File type to remove :return none: """ files = list_files(extension, path) for file in files: if file.endswith(extension): os.remove(os.path.join(path, file)) return
Remove all files in the given directory with the given extension :param str path: Directory :param str extension: File type to remove :return none:
def get_vm_by_name(content, name, regex=False): ''' Get a VM by its name ''' return get_object_by_name(content, vim.VirtualMachine, name, regex)
Get a VM by its name
def connect_amqp_by_unit(self, sentry_unit, ssl=False, port=None, fatal=True, username="testuser1", password="changeme"): """Establish and return a pika amqp connection to the rabbitmq service running on a rmq juju unit. :param sentry_unit: sentry unit pointer :param ssl: boolean, default to False :param port: amqp port, use defaults if None :param fatal: boolean, default to True (raises on connect error) :param username: amqp user name, default to testuser1 :param password: amqp user password :returns: pika amqp connection pointer or None if failed and non-fatal """ host = sentry_unit.info['public-address'] unit_name = sentry_unit.info['unit_name'] # Default port logic if port is not specified if ssl and not port: port = 5671 elif not ssl and not port: port = 5672 self.log.debug('Connecting to amqp on {}:{} ({}) as ' '{}...'.format(host, port, unit_name, username)) try: credentials = pika.PlainCredentials(username, password) parameters = pika.ConnectionParameters(host=host, port=port, credentials=credentials, ssl=ssl, connection_attempts=3, retry_delay=5, socket_timeout=1) connection = pika.BlockingConnection(parameters) assert connection.is_open is True assert connection.is_closing is False self.log.debug('Connect OK') return connection except Exception as e: msg = ('amqp connection failed to {}:{} as ' '{} ({})'.format(host, port, username, str(e))) if fatal: amulet.raise_status(amulet.FAIL, msg) else: self.log.warn(msg) return None
Establish and return a pika amqp connection to the rabbitmq service running on a rmq juju unit. :param sentry_unit: sentry unit pointer :param ssl: boolean, default to False :param port: amqp port, use defaults if None :param fatal: boolean, default to True (raises on connect error) :param username: amqp user name, default to testuser1 :param password: amqp user password :returns: pika amqp connection pointer or None if failed and non-fatal
def __remove_duplicates(self, _other): """Remove from other items already in list.""" if not isinstance(_other, type(self)) \ and not isinstance(_other, type(list)) \ and not isinstance(_other, type([])): other = [_other] else: other = list(_other) # remove items already in self newother = [] for i in range(0, len(other)): item = other.pop(0) if not list.__contains__(self, item): newother.append(item) # remove duplicate items in other other = [] if newother != []: other.append(newother[0]) for i in range(1, len(newother)): item = newother.pop() if not other.__contains__(item): other.append(item) return other
Remove from other items already in list.
def generate_item_instances(cls, items, mediawiki_api_url='https://www.wikidata.org/w/api.php', login=None, user_agent=config['USER_AGENT_DEFAULT']): """ A method which allows for retrieval of a list of Wikidata items or properties. The method generates a list of tuples where the first value in the tuple is the QID or property ID, whereas the second is the new instance of WDItemEngine containing all the data of the item. This is most useful for mass retrieval of WD items. :param items: A list of QIDs or property IDs :type items: list :param mediawiki_api_url: The MediaWiki url which should be used :type mediawiki_api_url: str :param login: An object of type WDLogin, which holds the credentials/session cookies required for >50 item bulk retrieval of items. :type login: wdi_login.WDLogin :return: A list of tuples, first value in the tuple is the QID or property ID string, second value is the instance of WDItemEngine with the corresponding item data. """ assert type(items) == list url = mediawiki_api_url params = { 'action': 'wbgetentities', 'ids': '|'.join(items), 'format': 'json' } headers = { 'User-Agent': user_agent } if login: reply = login.get_session().get(url, params=params, headers=headers) else: reply = requests.get(url, params=params) item_instances = [] for qid, v in reply.json()['entities'].items(): ii = cls(wd_item_id=qid, item_data=v) ii.mediawiki_api_url = mediawiki_api_url item_instances.append((qid, ii)) return item_instances
A method which allows for retrieval of a list of Wikidata items or properties. The method generates a list of tuples where the first value in the tuple is the QID or property ID, whereas the second is the new instance of WDItemEngine containing all the data of the item. This is most useful for mass retrieval of WD items. :param items: A list of QIDs or property IDs :type items: list :param mediawiki_api_url: The MediaWiki url which should be used :type mediawiki_api_url: str :param login: An object of type WDLogin, which holds the credentials/session cookies required for >50 item bulk retrieval of items. :type login: wdi_login.WDLogin :return: A list of tuples, first value in the tuple is the QID or property ID string, second value is the instance of WDItemEngine with the corresponding item data.
def content_sha1(context): """ Used by the FileContent model to automatically compute the sha1 hash of content before storing it to the database. """ try: content = context.current_parameters['content'] except AttributeError: content = context return hashlib.sha1(encodeutils.to_utf8(content)).hexdigest()
Used by the FileContent model to automatically compute the sha1 hash of content before storing it to the database.
def install_service(instance, dbhost, dbname, port): """Install systemd service configuration""" _check_root() log("Installing systemd service") launcher = os.path.realpath(__file__).replace('manage', 'launcher') executable = sys.executable + " " + launcher executable += " --instance " + instance executable += " --dbname " + dbname + " --dbhost " + dbhost executable += " --port " + port executable += " --dolog --logfile /var/log/hfos-" + instance + ".log" executable += " --logfileverbosity 30 -q" definitions = { 'instance': instance, 'executable': executable } service_name = 'hfos-' + instance + '.service' write_template_file(os.path.join('dev/templates', service_template), os.path.join('/etc/systemd/system/', service_name), definitions) Popen([ 'systemctl', 'enable', service_name ]) log('Launching service') Popen([ 'systemctl', 'start', service_name ]) log("Done: Install Service")
Install systemd service configuration
def addBrokerList(self, aBrokerInfoList): """Add a broker to the broker cluster available list. Connects to the added broker if needed.""" self.clusterAvailable.update(set(aBrokerInfoList)) # If we need another connection to a fellow broker # TODO: only connect to a given number for aBrokerInfo in aBrokerInfoList: self.clusterSocket.connect( "tcp://{hostname}:{port}".format( hostname=aBrokerInfo.hostname, port=aBrokerInfo.task_port, ) ) self.cluster.append(aBrokerInfo)
Add a broker to the broker cluster available list. Connects to the added broker if needed.
def create_session(self): """ Request a new session id """ url = self.build_url(self._endpoints.get('create_session')) response = self.con.post(url, data={'persistChanges': self.persist}) if not response: raise RuntimeError('Could not create session as requested by the user.') data = response.json() self.session_id = data.get('id') return True
Request a new session id
def RGB_to_XYZ(cobj, target_illuminant=None, *args, **kwargs): """ RGB to XYZ conversion. Expects 0-255 RGB values. Based off of: http://www.brucelindbloom.com/index.html?Eqn_RGB_to_XYZ.html """ # Will contain linearized RGB channels (removed the gamma func). linear_channels = {} if isinstance(cobj, sRGBColor): for channel in ['r', 'g', 'b']: V = getattr(cobj, 'rgb_' + channel) if V <= 0.04045: linear_channels[channel] = V / 12.92 else: linear_channels[channel] = math.pow((V + 0.055) / 1.055, 2.4) elif isinstance(cobj, BT2020Color): if kwargs.get('is_12_bits_system'): a, b, c = 1.0993, 0.0181, 0.081697877417347 else: a, b, c = 1.099, 0.018, 0.08124794403514049 for channel in ['r', 'g', 'b']: V = getattr(cobj, 'rgb_' + channel) if V <= c: linear_channels[channel] = V / 4.5 else: linear_channels[channel] = math.pow((V + (a - 1)) / a, 1 / 0.45) else: # If it's not sRGB... gamma = cobj.rgb_gamma for channel in ['r', 'g', 'b']: V = getattr(cobj, 'rgb_' + channel) linear_channels[channel] = math.pow(V, gamma) # Apply an RGB working space matrix to the XYZ values (matrix mul). xyz_x, xyz_y, xyz_z = apply_RGB_matrix( linear_channels['r'], linear_channels['g'], linear_channels['b'], rgb_type=cobj, convtype="rgb_to_xyz") if target_illuminant is None: target_illuminant = cobj.native_illuminant # The illuminant of the original RGB object. This will always match # the RGB colorspace's native illuminant. illuminant = cobj.native_illuminant xyzcolor = XYZColor(xyz_x, xyz_y, xyz_z, illuminant=illuminant) # This will take care of any illuminant changes for us (if source # illuminant != target illuminant). xyzcolor.apply_adaptation(target_illuminant) return xyzcolor
RGB to XYZ conversion. Expects 0-255 RGB values. Based off of: http://www.brucelindbloom.com/index.html?Eqn_RGB_to_XYZ.html
def guessoffset(args): """ %prog guessoffset fastqfile Guess the quality offset of the fastqfile, whether 33 or 64. See encoding schemes: <http://en.wikipedia.org/wiki/FASTQ_format> SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS............................... ..........................XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ...............................IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII .................................JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL............................... !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefgh | | | | | 33 59 64 73 104 S - Sanger Phred+33, raw reads typically (0, 40) X - Solexa Solexa+64, raw reads typically (-5, 40) I - Illumina 1.3+ Phred+64, raw reads typically (0, 40) J - Illumina 1.5+ Phred+64, raw reads typically (3, 40) L - Illumina 1.8+ Phred+33, raw reads typically (0, 40) with 0=unused, 1=unused, 2=Read Segment Quality Control Indicator (bold) """ p = OptionParser(guessoffset.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastqfile, = args ai = iter_fastq(fastqfile) rec = next(ai) offset = 64 while rec: quality = rec.quality lowcounts = len([x for x in quality if x < 59]) highcounts = len([x for x in quality if x > 74]) diff = highcounts - lowcounts if diff > 10: break elif diff < -10: offset = 33 break rec = next(ai) if offset == 33: print("Sanger encoding (offset=33)", file=sys.stderr) elif offset == 64: print("Illumina encoding (offset=64)", file=sys.stderr) return offset
%prog guessoffset fastqfile Guess the quality offset of the fastqfile, whether 33 or 64. See encoding schemes: <http://en.wikipedia.org/wiki/FASTQ_format> SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS............................... ..........................XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ...............................IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII .................................JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL............................... !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefgh | | | | | 33 59 64 73 104 S - Sanger Phred+33, raw reads typically (0, 40) X - Solexa Solexa+64, raw reads typically (-5, 40) I - Illumina 1.3+ Phred+64, raw reads typically (0, 40) J - Illumina 1.5+ Phred+64, raw reads typically (3, 40) L - Illumina 1.8+ Phred+33, raw reads typically (0, 40) with 0=unused, 1=unused, 2=Read Segment Quality Control Indicator (bold)
def message_about_scripts_not_on_PATH(scripts): # type: (Sequence[str]) -> Optional[str] """Determine if any scripts are not on PATH and format a warning. Returns a warning message if one or more scripts are not on PATH, otherwise None. """ if not scripts: return None # Group scripts by the path they were installed in grouped_by_dir = collections.defaultdict(set) # type: Dict[str, set] for destfile in scripts: parent_dir = os.path.dirname(destfile) script_name = os.path.basename(destfile) grouped_by_dir[parent_dir].add(script_name) # We don't want to warn for directories that are on PATH. not_warn_dirs = [ os.path.normcase(i).rstrip(os.sep) for i in os.environ.get("PATH", "").split(os.pathsep) ] # If an executable sits with sys.executable, we don't warn for it. # This covers the case of venv invocations without activating the venv. executable_loc = os.environ.get("PIP_PYTHON_PATH", sys.executable) not_warn_dirs.append(os.path.normcase(os.path.dirname(executable_loc))) warn_for = { parent_dir: scripts for parent_dir, scripts in grouped_by_dir.items() if os.path.normcase(parent_dir) not in not_warn_dirs } if not warn_for: return None # Format a message msg_lines = [] for parent_dir, scripts in warn_for.items(): scripts = sorted(scripts) if len(scripts) == 1: start_text = "script {} is".format(scripts[0]) else: start_text = "scripts {} are".format( ", ".join(scripts[:-1]) + " and " + scripts[-1] ) msg_lines.append( "The {} installed in '{}' which is not on PATH." .format(start_text, parent_dir) ) last_line_fmt = ( "Consider adding {} to PATH or, if you prefer " "to suppress this warning, use --no-warn-script-location." ) if len(msg_lines) == 1: msg_lines.append(last_line_fmt.format("this directory")) else: msg_lines.append(last_line_fmt.format("these directories")) # Returns the formatted multiline message return "\n".join(msg_lines)
Determine if any scripts are not on PATH and format a warning. Returns a warning message if one or more scripts are not on PATH, otherwise None.
def get_notification(self, id): """ Return a Notification object. :param id: The id of the notification object to return. """ url = self._base_url + "/3/notification/{0}".format(id) resp = self._send_request(url) return Notification(resp, self)
Return a Notification object. :param id: The id of the notification object to return.
def has_header_line(self, key, id_): """Return whether there is a header line with the given ID of the type given by ``key`` :param key: The VCF header key/line type. :param id_: The ID value to compare fore :return: ``True`` if there is a header line starting with ``##${key}=`` in the VCF file having the mapping entry ``ID`` set to ``id_``. """ if key not in self._indices: return False else: return id_ in self._indices[key]
Return whether there is a header line with the given ID of the type given by ``key`` :param key: The VCF header key/line type. :param id_: The ID value to compare fore :return: ``True`` if there is a header line starting with ``##${key}=`` in the VCF file having the mapping entry ``ID`` set to ``id_``.
def move_pos(line=1, column=1, file=sys.stdout): """ Move the cursor to a new position. Values are 1-based, and default to 1. Esc[<line>;<column>H or Esc[<line>;<column>f """ move.pos(line=line, col=column).write(file=file)
Move the cursor to a new position. Values are 1-based, and default to 1. Esc[<line>;<column>H or Esc[<line>;<column>f
def dbRestore(self, db_value, context=None): """ Extracts the db_value provided back from the database. :param db_value: <variant> :param context: <orb.Context> :return: <variant> """ if isinstance(db_value, (str, unicode)) and db_value.startswith('{'): try: db_value = projex.text.safe_eval(db_value) except StandardError: log.exception('Invalid reference found') raise orb.errors.OrbError('Invalid reference found.') if isinstance(db_value, dict): cls = self.referenceModel() if not cls: raise orb.errors.ModelNotFound(schema=self.reference()) else: load_event = orb.events.LoadEvent(data=db_value) # update the expansion information to not propagate to references if context: context = context.copy() expand = context.expandtree(cls) sub_expand = expand.pop(self.name(), {}) context.expand = context.raw_values['expand'] = sub_expand db_value = cls(loadEvent=load_event, context=context) return super(ReferenceColumn, self).dbRestore(db_value, context=context)
Extracts the db_value provided back from the database. :param db_value: <variant> :param context: <orb.Context> :return: <variant>
def verify_edge_segments(edge_infos): """Verify that the edge segments in an intersection are valid. .. note:: This is a helper used only by :func:`generic_intersect`. Args: edge_infos (Optional[list]): List of "edge info" lists. Each list represents a curved polygon and contains 3-tuples of edge index, start and end (see the output of :func:`ends_to_curve`). Raises: ValueError: If two consecutive edge segments lie on the same edge index. ValueError: If the start and end parameter are "invalid" (they should be between 0 and 1 and start should be strictly less than end). """ if edge_infos is None: return for edge_info in edge_infos: num_segments = len(edge_info) for index in six.moves.xrange(-1, num_segments - 1): index1, start1, end1 = edge_info[index] # First, verify the start and end parameters for the current # segment. if not 0.0 <= start1 < end1 <= 1.0: raise ValueError(BAD_SEGMENT_PARAMS, edge_info[index]) # Then, verify that the indices are not the same. index2, _, _ = edge_info[index + 1] if index1 == index2: raise ValueError( SEGMENTS_SAME_EDGE, edge_info[index], edge_info[index + 1] )
Verify that the edge segments in an intersection are valid. .. note:: This is a helper used only by :func:`generic_intersect`. Args: edge_infos (Optional[list]): List of "edge info" lists. Each list represents a curved polygon and contains 3-tuples of edge index, start and end (see the output of :func:`ends_to_curve`). Raises: ValueError: If two consecutive edge segments lie on the same edge index. ValueError: If the start and end parameter are "invalid" (they should be between 0 and 1 and start should be strictly less than end).
def _EntryToEvent(entry, handlers, transformers): """Converts an APIAuditEntry to a legacy AuditEvent.""" event = rdf_events.AuditEvent( timestamp=entry.timestamp, user=entry.username, action=handlers[entry.router_method_name]) for fn in transformers: fn(entry, event) return event
Converts an APIAuditEntry to a legacy AuditEvent.
def remove_from_sonos_playlist(self, sonos_playlist, track, update_id=0): """Remove a track from a Sonos Playlist. This is a convenience method for :py:meth:`reorder_sonos_playlist`. Example:: device.remove_from_sonos_playlist(sonos_playlist, track=0) Args: sonos_playlist (:py:class:`~.soco.data_structures.DidlPlaylistContainer`): Sonos playlist object or the item_id (str) of the Sonos playlist. track (int): *0**-based position of the track to move. The first track is track 0, just like indexing into a Python list. update_id (int): Optional update counter for the object. If left at the default of 0, it will be looked up. Returns: dict: See :py:meth:`reorder_sonos_playlist` Raises: SoCoUPnPException: See :py:meth:`reorder_sonos_playlist` """ return self.reorder_sonos_playlist(sonos_playlist, int(track), None, update_id)
Remove a track from a Sonos Playlist. This is a convenience method for :py:meth:`reorder_sonos_playlist`. Example:: device.remove_from_sonos_playlist(sonos_playlist, track=0) Args: sonos_playlist (:py:class:`~.soco.data_structures.DidlPlaylistContainer`): Sonos playlist object or the item_id (str) of the Sonos playlist. track (int): *0**-based position of the track to move. The first track is track 0, just like indexing into a Python list. update_id (int): Optional update counter for the object. If left at the default of 0, it will be looked up. Returns: dict: See :py:meth:`reorder_sonos_playlist` Raises: SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
def lookup_fg_color(self, fg_color): """ Return the color for use in the `windll.kernel32.SetConsoleTextAttribute` API call. :param fg_color: Foreground as text. E.g. 'ffffff' or 'red' """ # Foreground. if fg_color in FG_ANSI_COLORS: return FG_ANSI_COLORS[fg_color] else: return self._color_indexes(fg_color)[0]
Return the color for use in the `windll.kernel32.SetConsoleTextAttribute` API call. :param fg_color: Foreground as text. E.g. 'ffffff' or 'red'
def get_filename(request, geometry): """ Returns filename Returns the filename's location on disk where data is or is going to be stored. The files are stored in the folder specified by the user when initialising OGC-type of request. The name of the file has the following structure: {service_type}_{layer}_{geometry}_{crs}_{start_time}_{end_time}_{resolution}_{bins}_{histogram_type}_ *{custom_url_params}.json :param request: FIS request :type request: FisRequest :param geometry: geometry object :type: BBox or Geometry :return: filename for this request :rtype: str """ date_interval = parse_time_interval(request.time) geometry_string = geometry.wkt if isinstance(geometry, Geometry) else str(geometry) filename = '_'.join([ str(request.service_type.value), request.layer, geometry_string, CRS.ogc_string(geometry.crs), '{}_{}'.format(date_interval[0], date_interval[1]), request.resolution, str(request.bins) if request.bins else '', request.histogram_type.value if request.histogram_type else '' ]) filename = OgcImageService.filename_add_custom_url_params(filename, request) return OgcImageService.finalize_filename(filename, MimeType.JSON)
Returns filename Returns the filename's location on disk where data is or is going to be stored. The files are stored in the folder specified by the user when initialising OGC-type of request. The name of the file has the following structure: {service_type}_{layer}_{geometry}_{crs}_{start_time}_{end_time}_{resolution}_{bins}_{histogram_type}_ *{custom_url_params}.json :param request: FIS request :type request: FisRequest :param geometry: geometry object :type: BBox or Geometry :return: filename for this request :rtype: str
def load_progress(self, resume_step): """ load_progress: loads progress from restoration file Args: resume_step (str): step at which to resume session Returns: manager with progress from step """ resume_step = Status[resume_step] progress_path = self.get_restore_path(resume_step) # If progress is corrupted, revert to step before while not self.check_for_session(resume_step): config.LOGGER.error("Ricecooker has not reached {0} status. Reverting to earlier step...".format(resume_step.name)) # All files are corrupted or absent, restart process if resume_step.value - 1 < 0: self.init_session() return self resume_step = Status(resume_step.value - 1) progress_path = self.get_restore_path(resume_step) config.LOGGER.error("Starting from status {0}".format(resume_step.name)) # Load manager with open(progress_path, 'rb') as handle: manager = pickle.load(handle) if isinstance(manager, RestoreManager): return manager else: return self
load_progress: loads progress from restoration file Args: resume_step (str): step at which to resume session Returns: manager with progress from step
def _get_solr_type(self, field): """Returns the Solr type of the specified field name. Assumes the convention of dynamic fields using an underscore + type character code for the field name. """ field_type = 'string' try: field_type = FIELD_TYPE_CONVERSION_MAP[field] return field_type except: pass fta = field.split('_') if len(fta) > 1: ft = fta[len(fta) - 1] try: field_type = FIELD_TYPE_CONVERSION_MAP[ft] # cache the type so it's used next time FIELD_TYPE_CONVERSION_MAP[field] = field_type except: pass return field_type
Returns the Solr type of the specified field name. Assumes the convention of dynamic fields using an underscore + type character code for the field name.
def search(self, song_title, limit=1): """ 根据歌曲名搜索歌曲 : params : song_title: 歌曲名 limit: 搜索数量 """ url = "http://music.163.com/api/search/pc" headers = {'Cookie': 'appver=1.5.2', 'Referer': 'http://music.163.com'} payload = {'s': song_title, 'limit': limit, 'type': 1} r = requests.post(url, params=payload, headers=headers) data = json.loads(r.text) if data['code'] == 200: return data['result']['songs'][0] else: return None
根据歌曲名搜索歌曲 : params : song_title: 歌曲名 limit: 搜索数量
def eq(self, value): """Construct an equal to (``=``) filter. :param value: Filter value :return: :class:`filters.Field <filters.Field>` object :rtype: filters.Field """ self.op = '=' self.negate_op = '!=' self.value = self._value(value) return self
Construct an equal to (``=``) filter. :param value: Filter value :return: :class:`filters.Field <filters.Field>` object :rtype: filters.Field
def set_vm_status(self, device='FLOPPY', boot_option='BOOT_ONCE', write_protect='YES'): """Sets the Virtual Media drive status It sets the boot option for virtual media device. Note: boot option can be set only for CD device. :param device: virual media device :param boot_option: boot option to set on the virtual media device :param write_protect: set the write protect flag on the vmedia device Note: It's ignored. In Redfish it is read-only. :raises: IloError, on an error from iLO. :raises: IloInvalidInputError, if the device is not valid. """ # CONNECT is a RIBCL call. There is no such property to set in Redfish. if boot_option == 'CONNECT': return self._validate_virtual_media(device) if boot_option not in BOOT_OPTION_MAP: msg = (self._("Virtual media boot option '%s' is invalid.") % boot_option) LOG.debug(msg) raise exception.IloInvalidInputError(msg) manager = self._get_sushy_manager(PROLIANT_MANAGER_ID) try: vmedia_device = ( manager.virtual_media.get_member_device( VIRTUAL_MEDIA_MAP[device])) vmedia_device.set_vm_status(BOOT_OPTION_MAP[boot_option]) except sushy.exceptions.SushyError as e: msg = (self._("The Redfish controller failed to set the virtual " "media status for '%(device)s'. Error %(error)s") % {'device': device, 'error': str(e)}) LOG.debug(msg) raise exception.IloError(msg)
Sets the Virtual Media drive status It sets the boot option for virtual media device. Note: boot option can be set only for CD device. :param device: virual media device :param boot_option: boot option to set on the virtual media device :param write_protect: set the write protect flag on the vmedia device Note: It's ignored. In Redfish it is read-only. :raises: IloError, on an error from iLO. :raises: IloInvalidInputError, if the device is not valid.
def get_color_scheme(name): """Get syntax color scheme""" color_scheme = {} for key in sh.COLOR_SCHEME_KEYS: color_scheme[key] = CONF.get("appearance", "%s/%s" % (name, key)) return color_scheme
Get syntax color scheme
def get_composite_keywords(ckw_db, fulltext, skw_spans): """Return a list of composite keywords bound with number of occurrences. :param ckw_db: list of KewordToken objects (they are supposed to be composite ones) :param fulltext: string to search in :param skw_spans: dictionary of already identified single keywords :return : dictionary of matches in a format { <keyword object>, [[position, position...], [info_about_matches] ], .. } """ timer_start = time.clock() # Build the list of composite candidates ckw_out = {} skw_as_components = [] for composite_keyword in ckw_db.values(): # Counters for the composite keyword. First count is for the # number of occurrences in the whole document and second count # is for the human defined keywords. ckw_count = 0 matched_spans = [] # First search in the fulltext using the regex pattern of the whole # composite keyword (including the alternative labels) for regex in composite_keyword.regex: for match in regex.finditer(fulltext): span = list(match.span()) span[1] -= 1 span = tuple(span) if span not in matched_spans: ckw_count += 1 matched_spans.append(span) # Get the single keywords locations. try: components = composite_keyword.compositeof except AttributeError: current_app.logger.error( "Cached ontology is corrupted. Please " "remove the cached ontology in your temporary file." ) raise OntologyError('Cached ontology is corrupted.') spans = [] try: spans = [skw_spans[component][0] for component in components] except KeyError: # Some of the keyword components are not to be found in the text. # Therefore we cannot continue because the match is incomplete. pass ckw_spans = [] for index in range(len(spans) - 1): len_ckw = len(ckw_spans) if ckw_spans: # cause ckw_spans include the previous previous_spans = ckw_spans else: previous_spans = spans[index] for new_span in [(span0, colmd1) for span0 in previous_spans for colmd1 in spans[index + 1]]: span = _get_ckw_span(fulltext, new_span) if span is not None: ckw_spans.append(span) # the spans must be overlapping to be included if index > 0 and ckw_spans: _ckw_spans = [] for _span in ckw_spans[len_ckw:]: # new spans for _colmd2 in ckw_spans[:len_ckw]: s = _span_overlapping(_span, _colmd2) if s: _ckw_spans.append(s) ckw_spans = _ckw_spans for matched_span in [mspan for mspan in ckw_spans if mspan not in matched_spans]: ckw_count += 1 matched_spans.append(matched_span) if ckw_count: # Gather the component counts. component_counts = [] for component in components: skw_as_components.append(component) # Get the single keyword count. try: component_counts.append(len(skw_spans[component][0])) except KeyError: component_counts.append(0) # Store the composite keyword ckw_out[composite_keyword] = [matched_spans, component_counts] # Remove the single keywords that appear as components from the list # of single keywords. for skw in skw_as_components: try: del skw_spans[skw] except KeyError: pass # Remove the composite keywords that are fully present in # longer composite keywords _ckw_base = filter(lambda x: len(x.compositeof) == 2, ckw_out.keys()) _ckw_extended = sorted( filter(lambda x: len(x.compositeof) > 2, ckw_out.keys()), key=lambda x: len(x.compositeof)) if _ckw_extended: candidates = [] for kw1 in _ckw_base: s1 = set(kw1.compositeof) for kw2 in _ckw_extended: s2 = set(kw2.compositeof) if s1.issubset(s2): candidates.append((kw1, kw2)) # break # don't stop because this keyword may be # partly contained by kw_x and kw_y for i in range(len(_ckw_extended)): kw1 = _ckw_extended[i] s1 = set(kw1.compositeof) for ii in range(i + 1, len(_ckw_extended)): kw2 = _ckw_extended[ii] s2 = set(kw2.compositeof) if s1.issubset(s2): candidates.append((kw1, kw2)) break if candidates: for kw1, kw2 in candidates: try: match1 = ckw_out[kw1] # subset of the kw2 match2 = ckw_out[kw2] except KeyError: continue positions1 = match1[0] for pos1 in positions1: for pos2 in match2[0]: if _span_overlapping(pos1, pos2): del positions1[positions1.index(pos1)] # if we removed all the matches also # delete the keyword if len(positions1) == 0: del ckw_out[kw1] break current_app.logger.info( "Matching composite keywords... %d keywords found " "in %.1f sec." % (len(ckw_out), time.clock() - timer_start), ) return ckw_out
Return a list of composite keywords bound with number of occurrences. :param ckw_db: list of KewordToken objects (they are supposed to be composite ones) :param fulltext: string to search in :param skw_spans: dictionary of already identified single keywords :return : dictionary of matches in a format { <keyword object>, [[position, position...], [info_about_matches] ], .. }
def visit_update(self, update_stmt, **kw): """ used to compile <sql.expression.Update> expressions Parts are taken from the SQLCompiler base class. """ if not update_stmt.parameters and \ not hasattr(update_stmt, '_crate_specific'): return super(CrateCompiler, self).visit_update(update_stmt, **kw) self.isupdate = True extra_froms = update_stmt._extra_froms text = 'UPDATE ' if update_stmt._prefixes: text += self._generate_prefixes(update_stmt, update_stmt._prefixes, **kw) table_text = self.update_tables_clause(update_stmt, update_stmt.table, extra_froms, **kw) dialect_hints = None if update_stmt._hints: dialect_hints, table_text = self._setup_crud_hints( update_stmt, table_text ) crud_params = self._get_crud_params(update_stmt, **kw) text += table_text text += ' SET ' include_table = extra_froms and \ self.render_table_with_column_in_update_from set_clauses = [] for k, v in crud_params: clause = k._compiler_dispatch(self, include_table=include_table) + \ ' = ' + v set_clauses.append(clause) for k, v in update_stmt.parameters.items(): if isinstance(k, str) and '[' in k: bindparam = sa.sql.bindparam(k, v) set_clauses.append(k + ' = ' + self.process(bindparam)) text += ', '.join(set_clauses) if self.returning or update_stmt._returning: if not self.returning: self.returning = update_stmt._returning if self.returning_precedes_values: text += " " + self.returning_clause( update_stmt, self.returning) if extra_froms: extra_from_text = self.update_from_clause( update_stmt, update_stmt.table, extra_froms, dialect_hints, **kw) if extra_from_text: text += " " + extra_from_text if update_stmt._whereclause is not None: t = self.process(update_stmt._whereclause) if t: text += " WHERE " + t limit_clause = self.update_limit_clause(update_stmt) if limit_clause: text += " " + limit_clause if self.returning and not self.returning_precedes_values: text += " " + self.returning_clause( update_stmt, self.returning) return text
used to compile <sql.expression.Update> expressions Parts are taken from the SQLCompiler base class.
async def confirmbalance(self, *args, **kwargs): """ Confirm balance after trading Accepts: - message (signed dictionary): - "txid" - str - "coinid" - str - "amount" - int Returns: - "address" - str - "coinid" - str - "amount" - int - "uid" - int - "unconfirmed" - int (0 by default) - "deposit" - int (0 by default) Verified: True """ # Get data from request if kwargs.get("message"): kwargs = json.loads(kwargs.get("message", "{}")) txid = kwargs.get("txid") coinid = kwargs.get("coinid") buyer_address = kwargs.get("buyer_address") cid = kwargs.get("cid") address = kwargs.get("buyer_address") try: coinid = coinid.replace("TEST", "") except: pass # Check if required fields exists if not all([coinid, cid, buyer_address, txid]): return {"error":400, "reason": "Confirm balance. Missed required fields"} if not coinid in settings.bridges.keys(): return await self.error_400("Confirm balance. Invalid coinid: %s" % coinid) # Get offers price self.account.blockchain.setendpoint(settings.bridges[coinid]) offer = await self.account.blockchain.getoffer(cid=cid, buyer_address=buyer_address) # Get offers price for updating balance amount = int(offer["price"]) coinid = "PUT" # Get sellers account history_database = self.client[settings.HISTORY] history_collection = history_database[coinid] history = await history_collection.find_one({"txid":txid}) try: account = await self.account.getaccountdata(public_key=history["public_key"]) except: return await self.error_404("Confirm balance. Not found current deal.") # Connect to balance database database = self.client[self.collection] balance_collection = database[coinid] # Try to update balance if exists balance = await balance_collection.find_one({"uid":account["id"]}) # Decrement unconfirmed submitted = int(balance["amount_frozen"]) - int(amount) if submitted < 0: return await self.error_400("Not enough frozen amount.") decremented = await balance_collection.find_one_and_update( {"uid":account["id"]}, {"$set":{"amount_frozen": str(submitted)}}) difference = int(balance["amount_active"]) + int(amount) updated = await balance_collection.find_one_and_update( {"uid":account["id"]}, {"$set":{"amount_active":str(difference)}}) if not updated: return {"error":404, "reason":"Confirm balance. Not found current transaction id"} # Delete transaction id field await history_collection.find_one_and_update({"txid":txid}, {"$unset":{"txid":1}}) if int(account["level"]) == 2: await self.account.updatelevel(**{"id":account["id"], "level":3}) return {i:updated[i] for i in updated if i != "_id" and i != "txid"}
Confirm balance after trading Accepts: - message (signed dictionary): - "txid" - str - "coinid" - str - "amount" - int Returns: - "address" - str - "coinid" - str - "amount" - int - "uid" - int - "unconfirmed" - int (0 by default) - "deposit" - int (0 by default) Verified: True
def occurrence_view( request, event_pk, pk, template='swingtime/occurrence_detail.html', form_class=forms.SingleOccurrenceForm ): ''' View a specific occurrence and optionally handle any updates. Context parameters: ``occurrence`` the occurrence object keyed by ``pk`` ``form`` a form object for updating the occurrence ''' occurrence = get_object_or_404(Occurrence, pk=pk, event__pk=event_pk) if request.method == 'POST': form = form_class(request.POST, instance=occurrence) if form.is_valid(): form.save() return http.HttpResponseRedirect(request.path) else: form = form_class(instance=occurrence) return render(request, template, {'occurrence': occurrence, 'form': form})
View a specific occurrence and optionally handle any updates. Context parameters: ``occurrence`` the occurrence object keyed by ``pk`` ``form`` a form object for updating the occurrence
def _set_protected_vlans(self, v, load=False): """ Setter method for protected_vlans, mapped from YANG variable /interface/ethernet/openflow/protected_vlans (container) If this variable is read-only (config: false) in the source YANG file, then _set_protected_vlans is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_protected_vlans() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=protected_vlans.protected_vlans, is_container='container', presence=False, yang_name="protected-vlans", rest_name="protected-vlans", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'protected vlan ', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """protected_vlans must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=protected_vlans.protected_vlans, is_container='container', presence=False, yang_name="protected-vlans", rest_name="protected-vlans", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'protected vlan ', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='container', is_config=True)""", }) self.__protected_vlans = t if hasattr(self, '_set'): self._set()
Setter method for protected_vlans, mapped from YANG variable /interface/ethernet/openflow/protected_vlans (container) If this variable is read-only (config: false) in the source YANG file, then _set_protected_vlans is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_protected_vlans() directly.
def generate_timing_stats(file_list, var_list): """ Parse all of the timing files, and generate some statistics about the run. Args: file_list: A list of timing files to parse var_list: A list of variables to look for in the timing file Returns: A dict containing values that have the form: [mean, min, max, mean, standard deviation] """ timing_result = dict() timing_summary = dict() for file in file_list: timing_result[file] = functions.parse_gptl(file, var_list) for var in var_list: var_time = [] for f, data in timing_result.items(): try: var_time.append(data[var]) except: continue if len(var_time): timing_summary[var] = {'mean': np.mean(var_time), 'max': np.max(var_time), 'min': np.min(var_time), 'std': np.std(var_time)} return timing_summary
Parse all of the timing files, and generate some statistics about the run. Args: file_list: A list of timing files to parse var_list: A list of variables to look for in the timing file Returns: A dict containing values that have the form: [mean, min, max, mean, standard deviation]
def commit_output(cls, shard_ctx, iterator): """Saves output references when a shard finishes. Inside end_shard(), an output writer can optionally use this method to persist some references to the outputs from this shard (e.g a list of filenames) Args: shard_ctx: map_job_context.ShardContext for this shard. iterator: an iterator that yields json serializable references to the outputs from this shard. Contents from the iterator can be accessible later via map_job.Job.get_outputs. """ # We accept an iterator just in case output references get too big. outs = tuple(iterator) shard_ctx._state.writer_state["outs"] = outs
Saves output references when a shard finishes. Inside end_shard(), an output writer can optionally use this method to persist some references to the outputs from this shard (e.g a list of filenames) Args: shard_ctx: map_job_context.ShardContext for this shard. iterator: an iterator that yields json serializable references to the outputs from this shard. Contents from the iterator can be accessible later via map_job.Job.get_outputs.
def get_job(self, job_resource_name: str) -> Dict: """Returns metadata about a previously created job. See get_job_result if you want the results of the job and not just metadata about the job. Params: job_resource_name: A string of the form `projects/project_id/programs/program_id/jobs/job_id`. Returns: A dictionary containing the metadata. """ return self.service.projects().programs().jobs().get( name=job_resource_name).execute()
Returns metadata about a previously created job. See get_job_result if you want the results of the job and not just metadata about the job. Params: job_resource_name: A string of the form `projects/project_id/programs/program_id/jobs/job_id`. Returns: A dictionary containing the metadata.
def _grouped(input_type, output_type, base_class, output_type_method): """Define a user-defined function that is applied per group. Parameters ---------- input_type : List[ibis.expr.datatypes.DataType] A list of the types found in :mod:`~ibis.expr.datatypes`. The length of this list must match the number of arguments to the function. Variadic arguments are not yet supported. output_type : ibis.expr.datatypes.DataType The return type of the function. base_class : Type[T] The base class of the generated Node output_type_method : Callable A callable that determines the method to call to get the expression type of the UDF See Also -------- ibis.pandas.udf.reduction ibis.pandas.udf.analytic """ def wrapper(func): funcsig = valid_function_signature(input_type, func) UDAFNode = type( func.__name__, (base_class,), { 'signature': sig.TypeSignature.from_dtypes(input_type), 'output_type': output_type_method(output_type), }, ) # An execution rule for a simple aggregate node @execute_node.register( UDAFNode, *udf_signature(input_type, pin=None, klass=pd.Series) ) def execute_udaf_node(op, *args, **kwargs): args, kwargs = arguments_from_signature( funcsig, *args, **kwargs ) return func(*args, **kwargs) # An execution rule for a grouped aggregation node. This # includes aggregates applied over a window. nargs = len(input_type) group_by_signatures = [ udf_signature(input_type, pin=pin, klass=SeriesGroupBy) for pin in range(nargs) ] @toolz.compose( *( execute_node.register(UDAFNode, *types) for types in group_by_signatures ) ) def execute_udaf_node_groupby(op, *args, **kwargs): # construct a generator that yields the next group of data # for every argument excluding the first (pandas performs # the iteration for the first argument) for each argument # that is a SeriesGroupBy. # # If the argument is not a SeriesGroupBy then keep # repeating it until all groups are exhausted. aggcontext = kwargs.pop('aggcontext', None) assert aggcontext is not None, 'aggcontext is None' iters = ( (data for _, data in arg) if isinstance(arg, SeriesGroupBy) else itertools.repeat(arg) for arg in args[1:] ) funcsig = signature(func) def aggregator(first, *rest, **kwargs): # map(next, *rest) gets the inputs for the next group # TODO: might be inefficient to do this on every call args, kwargs = arguments_from_signature( funcsig, first, *map(next, rest), **kwargs ) return func(*args, **kwargs) result = aggcontext.agg(args[0], aggregator, *iters, **kwargs) return result @functools.wraps(func) def wrapped(*args): return UDAFNode(*args).to_expr() return wrapped return wrapper
Define a user-defined function that is applied per group. Parameters ---------- input_type : List[ibis.expr.datatypes.DataType] A list of the types found in :mod:`~ibis.expr.datatypes`. The length of this list must match the number of arguments to the function. Variadic arguments are not yet supported. output_type : ibis.expr.datatypes.DataType The return type of the function. base_class : Type[T] The base class of the generated Node output_type_method : Callable A callable that determines the method to call to get the expression type of the UDF See Also -------- ibis.pandas.udf.reduction ibis.pandas.udf.analytic
def set_thread(self, thread = None): """ Manually set the thread process. Use with care! @type thread: L{Thread} @param thread: (Optional) Thread object. Use C{None} to autodetect. """ if thread is None: self.__thread = None else: self.__load_Thread_class() if not isinstance(thread, Thread): msg = "Parent thread must be a Thread instance, " msg += "got %s instead" % type(thread) raise TypeError(msg) self.dwThreadId = thread.get_tid() self.__thread = thread
Manually set the thread process. Use with care! @type thread: L{Thread} @param thread: (Optional) Thread object. Use C{None} to autodetect.
def _decode_surrogatepass(data, codec): """Like data.decode(codec, 'surrogatepass') but makes utf-16-le/be work on Python < 3.4 + Windows https://bugs.python.org/issue27971 Raises UnicodeDecodeError, LookupError """ try: return data.decode(codec, _surrogatepass) except UnicodeDecodeError: if not _codec_can_decode_with_surrogatepass(codec): if _normalize_codec(codec) == "utf-16-be": data = _swap_bytes(data) codec = "utf-16-le" if _normalize_codec(codec) == "utf-16-le": buffer_ = ctypes.create_string_buffer(data + b"\x00\x00") value = ctypes.wstring_at(buffer_, len(data) // 2) if value.encode("utf-16-le", _surrogatepass) != data: raise return value else: raise else: raise
Like data.decode(codec, 'surrogatepass') but makes utf-16-le/be work on Python < 3.4 + Windows https://bugs.python.org/issue27971 Raises UnicodeDecodeError, LookupError
def do_loglevel(self, args, arguments): """ :: Usage: loglevel loglevel critical loglevel error loglevel warning loglevel info loglevel debug Shows current log level or changes it. loglevel - shows current log level critical - shows log message in critical level error - shows log message in error level including critical warning - shows log message in warning level including error info - shows log message in info level including warning debug - shows log message in debug level including info """ if arguments['debug']: self.loglevel = "DEBUG" elif arguments['error']: self.loglevel = "ERROR" elif arguments['warning']: self.loglevel = "WARNING" elif arguments['info']: self.loglevel = "INFO" elif arguments['critical']: self.loglevel = "CRITICAL" else: Console.ok("Log level: {0}".format(self.loglevel)) return Console.ok ("Log level: {0} is set".format(self.loglevel)) filename = path_expand("~/.cloudmesh/cmd3.yaml") config = ConfigDict(filename=filename) config["cmd3"]["properties"]["loglevel"] = self.loglevel config.write(filename=filename, output="yaml", attribute_indent=" ")
:: Usage: loglevel loglevel critical loglevel error loglevel warning loglevel info loglevel debug Shows current log level or changes it. loglevel - shows current log level critical - shows log message in critical level error - shows log message in error level including critical warning - shows log message in warning level including error info - shows log message in info level including warning debug - shows log message in debug level including info
def image_vacuum(name): ''' Delete images not in use or installed via image_present .. warning:: Only image_present states that are included via the top file will be detected. ''' name = name.lower() ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} # list of images to keep images = [] # retrieve image_present state data for host for state in __salt__['state.show_lowstate'](): # don't throw exceptions when not highstate run if 'state' not in state: continue # skip if not from this state module if state['state'] != __virtualname__: continue # skip if not image_present if state['fun'] not in ['image_present']: continue # keep images installed via image_present if 'name' in state: if _is_uuid(state['name']): images.append(state['name']) elif _is_docker_uuid(state['name']): state['name'] = __salt__['imgadm.docker_to_uuid'](state['name']) if not state['name']: continue images.append(state['name']) # retrieve images in use by vms for image_uuid in __salt__['vmadm.list'](order='image_uuid'): if image_uuid not in images: images.append(image_uuid) # purge unused images ret['result'] = True for image_uuid in __salt__['imgadm.list'](): if image_uuid in images: continue image = __salt__['imgadm.get'](image_uuid) if image['manifest']['name'] == 'docker-layer': # NOTE: docker images are made of multiple layers, loop over them while image: image_uuid = image['manifest']['uuid'] if image_uuid in __salt__['imgadm.delete'](image_uuid): ret['changes'][image_uuid] = None else: ret['result'] = False ret['comment'] = 'failed to delete images' if 'origin' in image['manifest']: image = __salt__['imgadm.get'](image['manifest']['origin']) else: image = None else: # NOTE: normal images can just be delete if image_uuid in __salt__['imgadm.delete'](image_uuid): ret['changes'][image_uuid] = None else: ret['result'] = False ret['comment'] = 'failed to delete images' if ret['result'] and not ret['changes']: ret['comment'] = 'no images deleted' elif ret['result'] and ret['changes']: ret['comment'] = 'images deleted' return ret
Delete images not in use or installed via image_present .. warning:: Only image_present states that are included via the top file will be detected.
def list(self): """List available reports from the server by returning a dictionary with reports classified by data model: .. doctest:: :options: +SKIP >>> odoo.report.list()['account.invoice'] [{'name': u'Duplicates', 'report_name': u'account.account_invoice_report_duplicate_main', 'report_type': u'qweb-pdf'}, {'name': 'Invoices', 'report_type': 'qweb-pdf', 'report_name': 'account.report_invoice'}] .. doctest:: :hide: >>> from pprint import pprint as pp >>> any(data['report_name'] == 'account.report_invoice' ... for data in odoo.report.list()['account.invoice']) True *Python 2:* :return: `list` of dictionaries :raise: `urllib2.URLError` (connection error) *Python 3:* :return: `list` of dictionaries :raise: `urllib.error.URLError` (connection error) """ report_model = 'ir.actions.report' if v(self._odoo.version)[0] < 11: report_model = 'ir.actions.report.xml' IrReport = self._odoo.env[report_model] report_ids = IrReport.search([]) reports = IrReport.read( report_ids, ['name', 'model', 'report_name', 'report_type']) result = {} for report in reports: model = report.pop('model') report.pop('id') if model not in result: result[model] = [] result[model].append(report) return result
List available reports from the server by returning a dictionary with reports classified by data model: .. doctest:: :options: +SKIP >>> odoo.report.list()['account.invoice'] [{'name': u'Duplicates', 'report_name': u'account.account_invoice_report_duplicate_main', 'report_type': u'qweb-pdf'}, {'name': 'Invoices', 'report_type': 'qweb-pdf', 'report_name': 'account.report_invoice'}] .. doctest:: :hide: >>> from pprint import pprint as pp >>> any(data['report_name'] == 'account.report_invoice' ... for data in odoo.report.list()['account.invoice']) True *Python 2:* :return: `list` of dictionaries :raise: `urllib2.URLError` (connection error) *Python 3:* :return: `list` of dictionaries :raise: `urllib.error.URLError` (connection error)
def electron_shells_are_subset(subset, superset, compare_meta=False, rel_tol=0.0): ''' Determine if a list of electron shells is a subset of another If 'subset' is a subset of the 'superset', True is returned. The shells are compared approximately (exponents/coefficients are within a tolerance) If compare_meta is True, the metadata is also compared for exact equality. ''' for item1 in subset: for item2 in superset: if compare_electron_shells(item1, item2, compare_meta, rel_tol): break else: return False return True
Determine if a list of electron shells is a subset of another If 'subset' is a subset of the 'superset', True is returned. The shells are compared approximately (exponents/coefficients are within a tolerance) If compare_meta is True, the metadata is also compared for exact equality.
def _patch_tcpserver(): """ Patch shutdown_request to open blocking interaction after the end of the request """ shutdown_request = TCPServer.shutdown_request def shutdown_request_patched(*args, **kwargs): thread = current_thread() shutdown_request(*args, **kwargs) if thread in _exc_cache: post_mortem_interaction(*_exc_cache.pop(thread)) TCPServer.shutdown_request = shutdown_request_patched
Patch shutdown_request to open blocking interaction after the end of the request
def expression(sceneid, tile_x, tile_y, tile_z, expr=None, **kwargs): """ Apply expression on data. Attributes ---------- sceneid : str Landsat id, Sentinel id, CBERS ids or file url. tile_x : int Mercator tile X index. tile_y : int Mercator tile Y index. tile_z : int Mercator tile ZOOM level. expr : str, required Expression to apply (e.g '(B5+B4)/(B5-B4)') Band name should start with 'B'. Returns ------- out : ndarray Returns processed pixel value. """ if not expr: raise Exception("Missing expression") bands_names = tuple(set(re.findall(r"b(?P<bands>[0-9A]{1,2})", expr))) rgb = expr.split(",") if sceneid.startswith("L"): from rio_tiler.landsat8 import tile as l8_tile arr, mask = l8_tile( sceneid, tile_x, tile_y, tile_z, bands=bands_names, **kwargs ) elif sceneid.startswith("S2"): from rio_tiler.sentinel2 import tile as s2_tile arr, mask = s2_tile( sceneid, tile_x, tile_y, tile_z, bands=bands_names, **kwargs ) elif sceneid.startswith("CBERS"): from rio_tiler.cbers import tile as cbers_tile arr, mask = cbers_tile( sceneid, tile_x, tile_y, tile_z, bands=bands_names, **kwargs ) else: from rio_tiler.main import tile as main_tile bands = tuple(map(int, bands_names)) arr, mask = main_tile(sceneid, tile_x, tile_y, tile_z, indexes=bands, **kwargs) ctx = {} for bdx, b in enumerate(bands_names): ctx["b{}".format(b)] = arr[bdx] return ( np.array( [np.nan_to_num(ne.evaluate(bloc.strip(), local_dict=ctx)) for bloc in rgb] ), mask, )
Apply expression on data. Attributes ---------- sceneid : str Landsat id, Sentinel id, CBERS ids or file url. tile_x : int Mercator tile X index. tile_y : int Mercator tile Y index. tile_z : int Mercator tile ZOOM level. expr : str, required Expression to apply (e.g '(B5+B4)/(B5-B4)') Band name should start with 'B'. Returns ------- out : ndarray Returns processed pixel value.
def get_float(self, key: str) -> Optional[float]: """ Returns an optional configuration value, as a float, by its key, or None if it doesn't exist. If the configuration value isn't a legal float, this function will throw an error. :param str key: The requested configuration key. :return: The configuration key's value, or None if one does not exist. :rtype: Optional[float] :raises ConfigTypeError: The configuration value existed but couldn't be coerced to float. """ v = self.get(key) if v is None: return None try: return float(v) except: raise ConfigTypeError(self.full_key(key), v, 'float')
Returns an optional configuration value, as a float, by its key, or None if it doesn't exist. If the configuration value isn't a legal float, this function will throw an error. :param str key: The requested configuration key. :return: The configuration key's value, or None if one does not exist. :rtype: Optional[float] :raises ConfigTypeError: The configuration value existed but couldn't be coerced to float.
def ls(quiet, verbose, uri): """List datasets / items in a dataset. If the URI is a dataset the items in the dataset will be listed. It is not possible to list the items in a proto dataset. If the URI is a location containing datasets the datasets will be listed. Proto datasets are highlighted in red. """ if dtoolcore._is_dataset(uri, CONFIG_PATH): _list_dataset_items(uri, quiet, verbose) else: _list_datasets(uri, quiet, verbose)
List datasets / items in a dataset. If the URI is a dataset the items in the dataset will be listed. It is not possible to list the items in a proto dataset. If the URI is a location containing datasets the datasets will be listed. Proto datasets are highlighted in red.
def copy_resources(src_container, src_resources, storage_dir, dst_directories=None, apply_chown=None, apply_chmod=None): """ Copies files and directories from a Docker container. Multiple resources can be copied and additional options are available than in :func:`copy_resource`. Unlike in :func:`copy_resource`, Resources are copied as they are and not compressed to a tarball, and they are left on the remote machine. :param src_container: Container name or id. :type src_container: unicode :param src_resources: Resources, as (file or directory) names to copy. :type src_resources: iterable :param storage_dir: Remote directory to store the copied objects in. :type storage_dir: unicode :param dst_directories: Optional dictionary of destination directories, in the format ``resource: destination``. If not set, resources will be in the same relative structure to one another as inside the container. For setting a common default, use ``*`` as the resource key. :type dst_directories: dict :param apply_chown: Owner to set for the copied resources. Can be a user name or id, group name or id, both in the notation ``user:group``, or as a tuple ``(user, group)``. :type apply_chown: unicode or tuple :param apply_chmod: File system permissions to set for the copied resources. Can be any notation as accepted by `chmod`. :type apply_chmod: unicode """ def _copy_resource(resource): default_dest_path = generic_path if generic_path is not None else resource dest_path = directories.get(resource, default_dest_path).strip(posixpath.sep) head, tail = posixpath.split(dest_path) rel_path = posixpath.join(storage_dir, head) run(mkdir(rel_path, check_if_exists=True)) run('docker cp {0}:{1} {2}'.format(src_container, resource, rel_path), shell=False) directories = dst_directories or {} generic_path = directories.get('*') for res in src_resources: _copy_resource(res) if apply_chmod: run(chmod(apply_chmod, storage_dir)) if apply_chown: sudo(chown(apply_chown, storage_dir))
Copies files and directories from a Docker container. Multiple resources can be copied and additional options are available than in :func:`copy_resource`. Unlike in :func:`copy_resource`, Resources are copied as they are and not compressed to a tarball, and they are left on the remote machine. :param src_container: Container name or id. :type src_container: unicode :param src_resources: Resources, as (file or directory) names to copy. :type src_resources: iterable :param storage_dir: Remote directory to store the copied objects in. :type storage_dir: unicode :param dst_directories: Optional dictionary of destination directories, in the format ``resource: destination``. If not set, resources will be in the same relative structure to one another as inside the container. For setting a common default, use ``*`` as the resource key. :type dst_directories: dict :param apply_chown: Owner to set for the copied resources. Can be a user name or id, group name or id, both in the notation ``user:group``, or as a tuple ``(user, group)``. :type apply_chown: unicode or tuple :param apply_chmod: File system permissions to set for the copied resources. Can be any notation as accepted by `chmod`. :type apply_chmod: unicode
def keys(self): "Returns a list of ConfigMap keys." return (list(self._pb.IntMap.keys()) + list(self._pb.StringMap.keys()) + list(self._pb.FloatMap.keys()) + list(self._pb.BoolMap.keys()))
Returns a list of ConfigMap keys.
def tapered_gutenberg_richter_cdf(moment, moment_threshold, beta, corner_moment): ''' Tapered Gutenberg Richter Cumulative Density Function :param float or numpy.ndarray moment: Moment for calculation of rate :param float or numpy.ndarray moment_threshold: Threshold Moment of the distribution (moment rate essentially!) :param float beta: Beta value (b * ln(10.)) of the Tapered Gutenberg-Richter Function :param float corner_momnet: Corner moment of the Tapered Gutenberg-Richter Function :returns: Cumulative probability of moment release > moment ''' cdf = np.exp((moment_threshold - moment) / corner_moment) return ((moment / moment_threshold) ** (-beta)) * cdf
Tapered Gutenberg Richter Cumulative Density Function :param float or numpy.ndarray moment: Moment for calculation of rate :param float or numpy.ndarray moment_threshold: Threshold Moment of the distribution (moment rate essentially!) :param float beta: Beta value (b * ln(10.)) of the Tapered Gutenberg-Richter Function :param float corner_momnet: Corner moment of the Tapered Gutenberg-Richter Function :returns: Cumulative probability of moment release > moment
def add_commit_branches(self, git_repo, enrich_backend): """Add the information about branches to the documents representing commits in the enriched index. Branches are obtained using the command `git ls-remote`, then for each branch, the list of commits is retrieved via the command `git rev-list branch-name` and used to update the corresponding items in the enriched index. :param git_repo: GitRepository object :param enrich_backend: the enrich backend """ to_process = [] for hash, refname in git_repo._discover_refs(remote=True): if not refname.startswith('refs/heads/'): continue commit_count = 0 branch_name = refname.replace('refs/heads/', '') try: commits = git_repo.rev_list([branch_name]) for commit in commits: to_process.append(commit) commit_count += 1 if commit_count == MAX_BULK_UPDATE_SIZE: self.__process_commits_in_branch(enrich_backend, branch_name, to_process) # reset the counter to_process = [] commit_count = 0 if commit_count: self.__process_commits_in_branch(enrich_backend, branch_name, to_process) except Exception as e: logger.error("Skip adding branch info for repo %s due to %s", git_repo.uri, e) return
Add the information about branches to the documents representing commits in the enriched index. Branches are obtained using the command `git ls-remote`, then for each branch, the list of commits is retrieved via the command `git rev-list branch-name` and used to update the corresponding items in the enriched index. :param git_repo: GitRepository object :param enrich_backend: the enrich backend
def store(self, response): """Store response in cache, skipping if code is forbidden. :param requests.Response response: HTTP response """ if response.status_code not in CACHE_CODES: return now = datetime.datetime.now() self.data[response.url] = { 'date': now, 'response': response, } logger.info('Stored response in cache') self._reduce_age(now) self._reduce_count()
Store response in cache, skipping if code is forbidden. :param requests.Response response: HTTP response
def pip_ins_req( ctx, python, req_path, venv_path=None, inputs=None, outputs=None, touch=None, check_import=False, check_import_module=None, pip_setup_file=None, pip_setup_touch=None, virtualenv_setup_touch=None, always=False, ): """ Create task that uses given virtual environment's `pip` to sets up \ packages listed in given requirements file. :param ctx: BuildContext object. :param python: Python program path used to set up `pip` and `virtualenv`. :param req_path: Requirements file relative path relative to top directory. :param venv_path: Virtual environment directory relative path relative to top directory. If given, will create the virtual environment and set up packages listed in given requirements file in the virtual environment. If not given, will set up packages listed in given requirements file in given Python program's environment. :param inputs: Input items list to add to created task. See :paramref:`create_cmd_task.inputs` for allowed item types. :param outputs: Output items list to add to created task. See :paramref:`create_cmd_task.outputs` for allowed item types. :param touch: Touch file path for dirty checking. :param check_import: Whether import module for dirty checking. :param check_import_module: Module name to import for dirty checking. :param pip_setup_file: `get-pip.py` file path for `pip_setup` task. :param pip_setup_touch: Touch file path for `pip_setup` task. :param virtualenv_setup_touch: Touch file path for `virtualenv_setup` task. :param always: Whether always run. :return: Created task. """ # Ensure given context object is BuildContext object _ensure_build_context(ctx) # If virtual environment directory path is not given if venv_path is None: # Use given Python program path venv_python = python # If virtual environment directory path is given else: # Get Python program path in the virtual environment venv_python = get_python_path(venv_path) # Mark the path as input target venv_python = mark_input(venv_python) # If virtual environment directory path is not given, # it means not create virtual environment. if venv_path is None: # Create task that sets up `pip` pip_setup_task = pip_setup( # Context ctx=ctx, # Python program path python=python, # `get-pip.py` file path setup_file=pip_setup_file, # Touch file path touch=pip_setup_touch, # Whether import module for dirty checking always=always, ) # Not create virtual environment venv_task = None # If virtual environment directory path is given else: # Not create task that sets up `pip` here because `create_venv` # function below will do pip_setup_task = None # Create task that sets up virtual environment venv_task = create_venv( # Context ctx=ctx, # Python program path python=python, # Virtual environment directory path venv_path=venv_path, # Output items list outputs=[ # Add the virtual environment's `python` program path as output # target for dirty checking get_python_path(venv_path), # Add the virtual environment's `pip` program path as output # target for dirty checking get_pip_path(venv_path), ], # Whether always run always=always, # Task name task_name='Create venv `{}`'.format(venv_path), # `get-pip.py` file path for `pip_setup` task pip_setup_file=pip_setup_file, # Touch file path for `pip_setup` task pip_setup_touch=pip_setup_touch, # Touch file path for `virtualenv_setup` task virtualenv_setup_touch=virtualenv_setup_touch, ) # If touch file path is not given if not touch: # Not update touch file touch_node = None # If touch file path is given else: # Update touch file touch_node, always = update_touch_file( # Context ctx=ctx, # Touch file path path=touch, # Whether import module for dirty checking check_import=check_import, # Module name to import for dirty checking check_import_module=check_import_module, # Python program path for dirty checking check_import_python=venv_python, # Whether always run always=always, ) # Create task that sets up packages task = create_cmd_task( # Context ctx=ctx, # Command parts parts=[ # Python program path venv_python, # Run module '-m', # Module name 'pip', # Install package 'install', # Read package names from requirements file '-r', # Requirements file path. Mark as input target. mark_input(req_path), ], # Input items list inputs=inputs, # Output items list outputs=[ # Use the touch node as output target for dirty checking touch_node, # Given output items list outputs, ], # Whether always run always=always, ) # Chain these tasks to run one after another chain_tasks([ pip_setup_task, venv_task, task, ]) # Return the created task return task
Create task that uses given virtual environment's `pip` to sets up \ packages listed in given requirements file. :param ctx: BuildContext object. :param python: Python program path used to set up `pip` and `virtualenv`. :param req_path: Requirements file relative path relative to top directory. :param venv_path: Virtual environment directory relative path relative to top directory. If given, will create the virtual environment and set up packages listed in given requirements file in the virtual environment. If not given, will set up packages listed in given requirements file in given Python program's environment. :param inputs: Input items list to add to created task. See :paramref:`create_cmd_task.inputs` for allowed item types. :param outputs: Output items list to add to created task. See :paramref:`create_cmd_task.outputs` for allowed item types. :param touch: Touch file path for dirty checking. :param check_import: Whether import module for dirty checking. :param check_import_module: Module name to import for dirty checking. :param pip_setup_file: `get-pip.py` file path for `pip_setup` task. :param pip_setup_touch: Touch file path for `pip_setup` task. :param virtualenv_setup_touch: Touch file path for `virtualenv_setup` task. :param always: Whether always run. :return: Created task.
def exchange_code_and_store_config(auth_client, auth_code): """ Finishes auth flow after code is gotten from command line or local server. Exchanges code for tokens and gets user info from auth. Stores tokens and user info in config. """ # do a token exchange with the given code tkn = auth_client.oauth2_exchange_code_for_tokens(auth_code) tkn = tkn.by_resource_server # extract access tokens from final response transfer_at = tkn["transfer.api.globus.org"]["access_token"] transfer_at_expires = tkn["transfer.api.globus.org"]["expires_at_seconds"] transfer_rt = tkn["transfer.api.globus.org"]["refresh_token"] auth_at = tkn["auth.globus.org"]["access_token"] auth_at_expires = tkn["auth.globus.org"]["expires_at_seconds"] auth_rt = tkn["auth.globus.org"]["refresh_token"] # revoke any existing tokens for token_opt in ( TRANSFER_RT_OPTNAME, TRANSFER_AT_OPTNAME, AUTH_RT_OPTNAME, AUTH_AT_OPTNAME, ): token = lookup_option(token_opt) if token: auth_client.oauth2_revoke_token(token) # write new tokens to config write_option(TRANSFER_RT_OPTNAME, transfer_rt) write_option(TRANSFER_AT_OPTNAME, transfer_at) write_option(TRANSFER_AT_EXPIRES_OPTNAME, transfer_at_expires) write_option(AUTH_RT_OPTNAME, auth_rt) write_option(AUTH_AT_OPTNAME, auth_at) write_option(AUTH_AT_EXPIRES_OPTNAME, auth_at_expires)
Finishes auth flow after code is gotten from command line or local server. Exchanges code for tokens and gets user info from auth. Stores tokens and user info in config.
def locus_read_generator( samfile, chromosome, base1_position_before_variant, base1_position_after_variant, use_duplicate_reads=USE_DUPLICATE_READS, use_secondary_alignments=USE_SECONDARY_ALIGNMENTS, min_mapping_quality=MIN_READ_MAPPING_QUALITY): """ Generator that yields a sequence of ReadAtLocus records for reads which contain the positions before and after a variant. The actual work to figure out if what's between those positions matches a variant happens later in the `variant_reads` module. Parameters ---------- samfile : pysam.AlignmentFile chromosome : str base1_position_before_variant : int Genomic position of reference nucleotide before a variant base1_position_after_variant : int Genomic position of reference nucleotide before a variant use_duplicate_reads : bool By default, we're ignoring any duplicate reads use_secondary_alignments : bool By default we are using secondary alignments, set this to False to only use primary alignments of reads. min_mapping_quality : int Drop reads below this mapping quality Yields ReadAtLocus objects """ logger.debug( "Gathering reads at locus %s: %d-%d", chromosome, base1_position_before_variant, base1_position_after_variant) base0_position_before_variant = base1_position_before_variant - 1 base0_position_after_variant = base1_position_after_variant - 1 count = 0 # We get a pileup at the base before the variant and then check to make sure # that reads also overlap the reference position after the variant. # # TODO: scan over a wider interval of pileups and collect reads that don't # overlap the bases before/after a variant due to splicing for pileup_element in pileup_reads_at_position( samfile=samfile, chromosome=chromosome, base0_position=base0_position_before_variant): read = LocusRead.from_pysam_pileup_element( pileup_element, base0_position_before_variant=base0_position_before_variant, base0_position_after_variant=base0_position_after_variant, use_secondary_alignments=use_secondary_alignments, use_duplicate_reads=use_duplicate_reads, min_mapping_quality=min_mapping_quality) if read is not None: count += 1 yield read logger.info( "Found %d reads overlapping locus %s: %d-%d", count, chromosome, base1_position_before_variant, base1_position_after_variant)
Generator that yields a sequence of ReadAtLocus records for reads which contain the positions before and after a variant. The actual work to figure out if what's between those positions matches a variant happens later in the `variant_reads` module. Parameters ---------- samfile : pysam.AlignmentFile chromosome : str base1_position_before_variant : int Genomic position of reference nucleotide before a variant base1_position_after_variant : int Genomic position of reference nucleotide before a variant use_duplicate_reads : bool By default, we're ignoring any duplicate reads use_secondary_alignments : bool By default we are using secondary alignments, set this to False to only use primary alignments of reads. min_mapping_quality : int Drop reads below this mapping quality Yields ReadAtLocus objects
def cg_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern, maxiter, tol, weighting='local', Cpt_params=None): """Use CG to smooth T by solving A T = 0, subject to nullspace and sparsity constraints. Parameters ---------- A : csr_matrix, bsr_matrix SPD sparse NxN matrix T : bsr_matrix Tentative prolongator, a NxM sparse matrix (M < N). This is initial guess for the equation A T = 0. Assumed that T B_c = B_f B : array Near-nullspace modes for coarse grid, i.e., B_c. Has shape (M,k) where k is the number of coarse candidate vectors. BtBinv : array 3 dimensional array such that, BtBinv[i] = pinv(B_i.H Bi), and B_i is B restricted to the neighborhood (in the matrix graph) of dof of i. Sparsity_Pattern : csr_matrix, bsr_matrix Sparse NxM matrix This is the sparsity pattern constraint to enforce on the eventual prolongator maxiter : int maximum number of iterations tol : float residual tolerance for A T = 0 weighting : string 'block', 'diagonal' or 'local' construction of the diagonal preconditioning Cpt_params : tuple Tuple of the form (bool, dict). If the Cpt_params[0] = False, then the standard SA prolongation smoothing is carried out. If True, then dict must be a dictionary of parameters containing, (1) P_I: P_I.T is the injection matrix for the Cpts, (2) I_F: an identity matrix for only the F-points (i.e. I, but with zero rows and columns for C-points) and I_C: the C-point analogue to I_F. Returns ------- T : bsr_matrix Smoothed prolongator using conjugate gradients to solve A T = 0, subject to the constraints, T B_c = B_f, and T has no nonzero outside of the sparsity pattern in Sparsity_Pattern. See Also -------- The principal calling routine, pyamg.aggregation.smooth.energy_prolongation_smoother """ # Preallocate AP = sparse.bsr_matrix((np.zeros(Sparsity_Pattern.data.shape, dtype=T.dtype), Sparsity_Pattern.indices, Sparsity_Pattern.indptr), shape=(Sparsity_Pattern.shape)) # CG will be run with diagonal preconditioning if weighting == 'diagonal': Dinv = get_diagonal(A, norm_eq=False, inv=True) elif weighting == 'block': Dinv = get_block_diag(A, blocksize=A.blocksize[0], inv_flag=True) Dinv = sparse.bsr_matrix((Dinv, np.arange(Dinv.shape[0]), np.arange(Dinv.shape[0]+1)), shape=A.shape) elif weighting == 'local': # Based on Gershgorin estimate D = np.abs(A)*np.ones((A.shape[0], 1), dtype=A.dtype) Dinv = np.zeros_like(D) Dinv[D != 0] = 1.0 / np.abs(D[D != 0]) else: raise ValueError('weighting value is invalid') # Calculate initial residual # Equivalent to R = -A*T; R = R.multiply(Sparsity_Pattern) # with the added constraint that R has an explicit 0 wherever # R is 0 and Sparsity_Pattern is not uones = np.zeros(Sparsity_Pattern.data.shape, dtype=T.dtype) R = sparse.bsr_matrix((uones, Sparsity_Pattern.indices, Sparsity_Pattern.indptr), shape=(Sparsity_Pattern.shape)) pyamg.amg_core.incomplete_mat_mult_bsr(A.indptr, A.indices, np.ravel(A.data), T.indptr, T.indices, np.ravel(T.data), R.indptr, R.indices, np.ravel(R.data), int(T.shape[0]/T.blocksize[0]), int(T.shape[1]/T.blocksize[1]), A.blocksize[0], A.blocksize[1], T.blocksize[1]) R.data *= -1.0 # Enforce R*B = 0 Satisfy_Constraints(R, B, BtBinv) if R.nnz == 0: print("Error in sa_energy_min(..). Initial R no nonzeros on a level. \ Returning tentative prolongator\n") return T # Calculate Frobenius norm of the residual resid = R.nnz # np.sqrt((R.data.conjugate()*R.data).sum()) # print "Energy Minimization of Prolongator \ # --- Iteration 0 --- r = " + str(resid) i = 0 while i < maxiter and resid > tol: # Apply diagonal preconditioner if weighting == 'local' or weighting == 'diagonal': Z = scale_rows(R, Dinv) else: Z = Dinv*R # Frobenius inner-product of (R,Z) = sum( np.conjugate(rk).*zk) newsum = (R.conjugate().multiply(Z)).sum() if newsum < tol: # met tolerance, so halt break # P is the search direction, not the prolongator, which is T. if(i == 0): P = Z oldsum = newsum else: beta = newsum / oldsum P = Z + beta*P oldsum = newsum # Calculate new direction and enforce constraints # Equivalent to: AP = A*P; AP = AP.multiply(Sparsity_Pattern) # with the added constraint that explicit zeros are in AP wherever # AP = 0 and Sparsity_Pattern does not !!!! AP.data[:] = 0.0 pyamg.amg_core.incomplete_mat_mult_bsr(A.indptr, A.indices, np.ravel(A.data), P.indptr, P.indices, np.ravel(P.data), AP.indptr, AP.indices, np.ravel(AP.data), int(T.shape[0]/T.blocksize[0]), int(T.shape[1]/T.blocksize[1]), A.blocksize[0], A.blocksize[1], P.blocksize[1]) # Enforce AP*B = 0 Satisfy_Constraints(AP, B, BtBinv) # Frobenius inner-product of (P, AP) alpha = newsum/(P.conjugate().multiply(AP)).sum() # Update the prolongator, T T = T + alpha*P # Ensure identity at C-pts if Cpt_params[0]: T = Cpt_params[1]['I_F']*T + Cpt_params[1]['P_I'] # Update residual R = R - alpha*AP i += 1 # Calculate Frobenius norm of the residual resid = R.nnz # np.sqrt((R.data.conjugate()*R.data).sum()) # print "Energy Minimization of Prolongator \ # --- Iteration " + str(i) + " --- r = " + str(resid) return T
Use CG to smooth T by solving A T = 0, subject to nullspace and sparsity constraints. Parameters ---------- A : csr_matrix, bsr_matrix SPD sparse NxN matrix T : bsr_matrix Tentative prolongator, a NxM sparse matrix (M < N). This is initial guess for the equation A T = 0. Assumed that T B_c = B_f B : array Near-nullspace modes for coarse grid, i.e., B_c. Has shape (M,k) where k is the number of coarse candidate vectors. BtBinv : array 3 dimensional array such that, BtBinv[i] = pinv(B_i.H Bi), and B_i is B restricted to the neighborhood (in the matrix graph) of dof of i. Sparsity_Pattern : csr_matrix, bsr_matrix Sparse NxM matrix This is the sparsity pattern constraint to enforce on the eventual prolongator maxiter : int maximum number of iterations tol : float residual tolerance for A T = 0 weighting : string 'block', 'diagonal' or 'local' construction of the diagonal preconditioning Cpt_params : tuple Tuple of the form (bool, dict). If the Cpt_params[0] = False, then the standard SA prolongation smoothing is carried out. If True, then dict must be a dictionary of parameters containing, (1) P_I: P_I.T is the injection matrix for the Cpts, (2) I_F: an identity matrix for only the F-points (i.e. I, but with zero rows and columns for C-points) and I_C: the C-point analogue to I_F. Returns ------- T : bsr_matrix Smoothed prolongator using conjugate gradients to solve A T = 0, subject to the constraints, T B_c = B_f, and T has no nonzero outside of the sparsity pattern in Sparsity_Pattern. See Also -------- The principal calling routine, pyamg.aggregation.smooth.energy_prolongation_smoother
def lat_from_pole(ref_loc_lon, ref_loc_lat, pole_plon, pole_plat): """ Calculate paleolatitude for a reference location based on a paleomagnetic pole Required Parameters ---------- ref_loc_lon: longitude of reference location in degrees ref_loc_lat: latitude of reference location pole_plon: paleopole longitude in degrees pole_plat: paleopole latitude in degrees """ ref_loc = (ref_loc_lon, ref_loc_lat) pole = (pole_plon, pole_plat) paleo_lat = 90 - pmag.angle(pole, ref_loc) return float(paleo_lat)
Calculate paleolatitude for a reference location based on a paleomagnetic pole Required Parameters ---------- ref_loc_lon: longitude of reference location in degrees ref_loc_lat: latitude of reference location pole_plon: paleopole longitude in degrees pole_plat: paleopole latitude in degrees
def _getdata_by_idx(data, idx): """Shuffle the data.""" shuffle_data = [] for k, v in data: if (isinstance(v, h5py.Dataset) if h5py else False): shuffle_data.append((k, v)) elif isinstance(v, CSRNDArray): shuffle_data.append((k, sparse_array(v.asscipy()[idx], v.context))) else: shuffle_data.append((k, array(v.asnumpy()[idx], v.context))) return shuffle_data
Shuffle the data.
def set_up_logging(log_file, console_log_level): """Configure logging settings and return a logger object.""" logger = logging.getLogger() logger.setLevel(logging.DEBUG) fh = logging.FileHandler(str(log_file)) fh.setLevel(logging.DEBUG) ch = logging.StreamHandler() ch.setLevel(console_log_level) formatter = logging.Formatter( "{asctime} {levelname} ({name}): {message}", style='{' ) fh.setFormatter(formatter) ch.setFormatter(formatter) logger.addHandler(fh) logger.addHandler(ch) return logger
Configure logging settings and return a logger object.
def validate_character_for_story_element(sender, instance, action, reverse, pk_set, *args, **kwargs): ''' Validates that character is from the same outline as the story node. ''' if action == 'pre_add': if reverse: for spk in pk_set: story_node = StoryElementNode.objects.get(pk=spk) if instance.outline != story_node.outline: raise IntegrityError(_('Character Instance must be from the same outline as story node.')) else: for cpk in pk_set: char_instance = CharacterInstance.objects.get(pk=cpk) if char_instance.outline != instance.outline: raise IntegrityError(_('Character Instance must be from the same outline as story node.'))
Validates that character is from the same outline as the story node.
def thresh(data, threshold, threshold_type='hard'): r"""Threshold data This method perfoms hard or soft thresholding on the input data Parameters ---------- data : np.ndarray, list or tuple Input data array threshold : float or np.ndarray Threshold level(s) threshold_type : str {'hard', 'soft'} Type of noise to be added (default is 'hard') Returns ------- np.ndarray thresholded data Raises ------ ValueError If `threshold_type` is not 'hard' or 'soft' Notes ----- Implements one of the following two equations: * Hard Threshold .. math:: \mathrm{HT}_\lambda(x) = \begin{cases} x & \text{if } |x|\geq\lambda \\ 0 & \text{otherwise} \end{cases} * Soft Threshold .. math:: \mathrm{ST}_\lambda(x) = \begin{cases} x-\lambda\text{sign}(x) & \text{if } |x|\geq\lambda \\ 0 & \text{otherwise} \end{cases} Examples -------- >>> import numpy as np >>> from modopt.signal.noise import thresh >>> np.random.seed(1) >>> x = np.random.randint(-9, 9, 10) >>> x array([-4, 2, 3, -1, 0, 2, -4, 6, -9, 7]) >>> thresh(x, 4) array([-4, 0, 0, 0, 0, 0, -4, 6, -9, 7]) >>> import numpy as np >>> from modopt.signal.noise import thresh >>> np.random.seed(1) >>> x = np.random.ranf((3, 3)) >>> x array([[ 4.17022005e-01, 7.20324493e-01, 1.14374817e-04], [ 3.02332573e-01, 1.46755891e-01, 9.23385948e-02], [ 1.86260211e-01, 3.45560727e-01, 3.96767474e-01]]) >>> thresh(x, 0.2, threshold_type='soft') array([[ 0.217022 , 0.52032449, -0. ], [ 0.10233257, -0. , -0. ], [-0. , 0.14556073, 0.19676747]]) """ data = np.array(data) if threshold_type not in ('hard', 'soft'): raise ValueError('Invalid threshold type. Options are "hard" or' '"soft"') if threshold_type == 'soft': return np.around(np.maximum((1.0 - threshold / np.maximum(np.finfo(np.float64).eps, np.abs(data))), 0.0) * data, decimals=15) else: return data * (np.abs(data) >= threshold)
r"""Threshold data This method perfoms hard or soft thresholding on the input data Parameters ---------- data : np.ndarray, list or tuple Input data array threshold : float or np.ndarray Threshold level(s) threshold_type : str {'hard', 'soft'} Type of noise to be added (default is 'hard') Returns ------- np.ndarray thresholded data Raises ------ ValueError If `threshold_type` is not 'hard' or 'soft' Notes ----- Implements one of the following two equations: * Hard Threshold .. math:: \mathrm{HT}_\lambda(x) = \begin{cases} x & \text{if } |x|\geq\lambda \\ 0 & \text{otherwise} \end{cases} * Soft Threshold .. math:: \mathrm{ST}_\lambda(x) = \begin{cases} x-\lambda\text{sign}(x) & \text{if } |x|\geq\lambda \\ 0 & \text{otherwise} \end{cases} Examples -------- >>> import numpy as np >>> from modopt.signal.noise import thresh >>> np.random.seed(1) >>> x = np.random.randint(-9, 9, 10) >>> x array([-4, 2, 3, -1, 0, 2, -4, 6, -9, 7]) >>> thresh(x, 4) array([-4, 0, 0, 0, 0, 0, -4, 6, -9, 7]) >>> import numpy as np >>> from modopt.signal.noise import thresh >>> np.random.seed(1) >>> x = np.random.ranf((3, 3)) >>> x array([[ 4.17022005e-01, 7.20324493e-01, 1.14374817e-04], [ 3.02332573e-01, 1.46755891e-01, 9.23385948e-02], [ 1.86260211e-01, 3.45560727e-01, 3.96767474e-01]]) >>> thresh(x, 0.2, threshold_type='soft') array([[ 0.217022 , 0.52032449, -0. ], [ 0.10233257, -0. , -0. ], [-0. , 0.14556073, 0.19676747]])
def wrmap(func, iterable, *args): 'Same as map(func, iterable, *args), but ignoring exceptions.' for it in iterable: try: yield func(it, *args) except Exception as e: pass
Same as map(func, iterable, *args), but ignoring exceptions.
def bot_has_permissions(**perms): """Similar to :func:`.has_permissions` except checks if the bot itself has the permissions listed. This check raises a special exception, :exc:`.BotMissingPermissions` that is inherited from :exc:`.CheckFailure`. """ def predicate(ctx): guild = ctx.guild me = guild.me if guild is not None else ctx.bot.user permissions = ctx.channel.permissions_for(me) missing = [perm for perm, value in perms.items() if getattr(permissions, perm, None) != value] if not missing: return True raise BotMissingPermissions(missing) return check(predicate)
Similar to :func:`.has_permissions` except checks if the bot itself has the permissions listed. This check raises a special exception, :exc:`.BotMissingPermissions` that is inherited from :exc:`.CheckFailure`.
def ignore_path(path, ignore_list=None, whitelist=None): """ Returns a boolean indicating if a path should be ignored given an ignore_list and a whitelist of glob patterns. """ if ignore_list is None: return True should_ignore = matches_glob_list(path, ignore_list) if whitelist is None: return should_ignore return should_ignore and not matches_glob_list(path, whitelist)
Returns a boolean indicating if a path should be ignored given an ignore_list and a whitelist of glob patterns.
def _reverse_convert(x, factor1, factor2): """ Converts mixing ratio x in c1 - c2 tie line to that in comp1 - comp2 tie line. Args: x (float): Mixing ratio x in c1 - c2 tie line, a float between 0 and 1. factor1 (float): Compositional ratio between composition c1 and processed composition comp1. E.g., factor for Composition('SiO2') and Composition('O') is 2. factor2 (float): Compositional ratio between composition c2 and processed composition comp2. Returns: Mixing ratio in comp1 - comp2 tie line, a float between 0 and 1. """ return x * factor1 / ((1-x) * factor2 + x * factor1)
Converts mixing ratio x in c1 - c2 tie line to that in comp1 - comp2 tie line. Args: x (float): Mixing ratio x in c1 - c2 tie line, a float between 0 and 1. factor1 (float): Compositional ratio between composition c1 and processed composition comp1. E.g., factor for Composition('SiO2') and Composition('O') is 2. factor2 (float): Compositional ratio between composition c2 and processed composition comp2. Returns: Mixing ratio in comp1 - comp2 tie line, a float between 0 and 1.
def add_cookie_header(self, request): """Add correct Cookie: header to request (urllib.request.Request object). The Cookie2 header is also added unless policy.hide_cookie2 is true. """ _debug("add_cookie_header") self._cookies_lock.acquire() try: self._policy._now = self._now = int(time.time()) cookies = self._cookies_for_request(request) attrs = self._cookie_attrs(cookies) if attrs: if not request.has_header("Cookie"): request.add_unredirected_header( "Cookie", "; ".join(attrs)) # if necessary, advertise that we know RFC 2965 if (self._policy.rfc2965 and not self._policy.hide_cookie2 and not request.has_header("Cookie2")): for cookie in cookies: if cookie.version != 1: request.add_unredirected_header("Cookie2", '$Version="1"') break finally: self._cookies_lock.release() self.clear_expired_cookies()
Add correct Cookie: header to request (urllib.request.Request object). The Cookie2 header is also added unless policy.hide_cookie2 is true.
def write(self, address, data, x, y, p=0): """Write a bytestring to an address in memory. It is strongly encouraged to only read and write to blocks of memory allocated using :py:meth:`.sdram_alloc`. Additionally, :py:meth:`.sdram_alloc_as_filelike` can be used to safely wrap read/write access to memory with a file-like interface and prevent accidental access to areas outside the allocated block. Parameters ---------- address : int The address at which to start writing the data. Addresses are given within the address space of a SpiNNaker core. See the SpiNNaker datasheet for more information. data : :py:class:`bytes` Data to write into memory. Writes are automatically broken into a sequence of SCP write commands. """ # Call the SCPConnection to perform the write on our behalf connection = self._get_connection(x, y) return connection.write(self.scp_data_length, self.scp_window_size, x, y, p, address, data)
Write a bytestring to an address in memory. It is strongly encouraged to only read and write to blocks of memory allocated using :py:meth:`.sdram_alloc`. Additionally, :py:meth:`.sdram_alloc_as_filelike` can be used to safely wrap read/write access to memory with a file-like interface and prevent accidental access to areas outside the allocated block. Parameters ---------- address : int The address at which to start writing the data. Addresses are given within the address space of a SpiNNaker core. See the SpiNNaker datasheet for more information. data : :py:class:`bytes` Data to write into memory. Writes are automatically broken into a sequence of SCP write commands.
def render_title(text, markup=True, no_smartquotes=False): """ Convert a Markdown title to HTML """ # HACK: If the title starts with something that looks like a list, save it # for later pfx, text = re.match(r'([0-9. ]*)(.*)', text).group(1, 2) text = pfx + misaka.Markdown(TitleRenderer(), extensions=TITLE_EXTENSIONS)(text) if not markup: strip = HTMLStripper() strip.feed(text) text = strip.get_data() if not no_smartquotes: text = misaka.smartypants(text) return flask.Markup(text)
Convert a Markdown title to HTML
def local_async(self, *args, **kwargs): ''' Run :ref:`execution modules <all-salt.modules>` asynchronously Wraps :py:meth:`salt.client.LocalClient.run_job`. :return: job ID ''' local = salt.client.get_local_client(mopts=self.opts) ret = local.run_job(*args, **kwargs) return ret
Run :ref:`execution modules <all-salt.modules>` asynchronously Wraps :py:meth:`salt.client.LocalClient.run_job`. :return: job ID
def replaceMaskedValue(self, replacementValue): """ Replaces values where the mask is True with the replacement value. """ if self.mask is False: pass elif self.mask is True: self.data[:] = replacementValue else: self.data[self.mask] = replacementValue
Replaces values where the mask is True with the replacement value.
def b58decode(val, charset=DEFAULT_CHARSET): """Decode base58check encoded input to original raw bytes. :param bytes val: The value to base58cheeck decode. :param bytes charset: (optional) The character set to use for decoding. :return: the decoded bytes. :rtype: bytes Usage:: >>> import base58check >>> base58check.b58decode('\x00v\x80\xad\xec\x8e\xab\xca\xba\xc6v\xbe' ... '\x9e\x83\x85J\xde\x0b\xd2,\xdb\x0b\xb9`\xde') b'1BoatSLRHtKNngkdXEeobR76b53LETtpyT' """ def _b58decode_int(val): output = 0 for char in val: output = output * base + charset.index(char) return output if isinstance(val, str): val = val.encode() if isinstance(charset, str): charset = charset.encode() base = len(charset) if not base == 58: raise ValueError('charset base must be 58, not %s' % base) pad_len = len(val) val = val.lstrip(bytes([charset[0]])) pad_len -= len(val) acc = _b58decode_int(val) result = deque() while acc > 0: acc, mod = divmod(acc, 256) result.appendleft(mod) prefix = b'\0' * pad_len return prefix + bytes(result)
Decode base58check encoded input to original raw bytes. :param bytes val: The value to base58cheeck decode. :param bytes charset: (optional) The character set to use for decoding. :return: the decoded bytes. :rtype: bytes Usage:: >>> import base58check >>> base58check.b58decode('\x00v\x80\xad\xec\x8e\xab\xca\xba\xc6v\xbe' ... '\x9e\x83\x85J\xde\x0b\xd2,\xdb\x0b\xb9`\xde') b'1BoatSLRHtKNngkdXEeobR76b53LETtpyT'
def describe_group(record, region): """Attempts to describe group ids.""" account_id = record['account'] group_name = cloudwatch.filter_request_parameters('groupName', record) vpc_id = cloudwatch.filter_request_parameters('vpcId', record) group_id = cloudwatch.filter_request_parameters('groupId', record, look_in_response=True) # Did this get collected already by the poller? if cloudwatch.get_collected_details(record): LOG.debug(f"[<--] Received already collected security group data: {record['detail']['collected']}") return [record['detail']['collected']] try: # Always depend on Group ID first: if group_id: # pylint: disable=R1705 return describe_security_groups( account_number=account_id, assume_role=HISTORICAL_ROLE, region=region, GroupIds=[group_id] )['SecurityGroups'] elif vpc_id and group_name: return describe_security_groups( account_number=account_id, assume_role=HISTORICAL_ROLE, region=region, Filters=[ { 'Name': 'group-name', 'Values': [group_name] }, { 'Name': 'vpc-id', 'Values': [vpc_id] } ] )['SecurityGroups'] else: raise Exception('[X] Did not receive Group ID or VPC/Group Name pairs. ' f'We got: ID: {group_id} VPC/Name: {vpc_id}/{group_name}.') except ClientError as exc: if exc.response['Error']['Code'] == 'InvalidGroup.NotFound': return [] raise exc
Attempts to describe group ids.
def kill_process(process_name): """ method is called to kill a running process """ try: sys.stdout.write('killing: {0} {{ \n'.format(process_name)) pid = get_process_pid(process_name) if pid is not None and psutil.pid_exists(int(pid)): p = psutil.Process(pid) p.kill() p.wait() remove_pid_file(process_name) except Exception as e: sys.stderr.write('Exception on killing {0} : {1} \n'.format(process_name, e)) finally: sys.stdout.write('}')
method is called to kill a running process
def get_protein_seq_for_transcript(self, transcript_id): """ obtain the sequence for a transcript from ensembl """ headers = {"content-type": "text/plain"} self.attempt = 0 ext = "/sequence/id/{}?type=protein".format(transcript_id) return self.ensembl_request(ext, headers)
obtain the sequence for a transcript from ensembl
def ensure_valid_environment_config(module_name, config): """Exit if config is invalid.""" if not config.get('namespace'): LOGGER.fatal("staticsite: module %s's environment configuration is " "missing a namespace definition!", module_name) sys.exit(1)
Exit if config is invalid.
def parse(args=None): """Defines how to parse CLI arguments for the DomainTools API""" parser = argparse.ArgumentParser(description='The DomainTools CLI API Client') parser.add_argument('-u', '--username', dest='user', default='', help='API Username') parser.add_argument('-k', '--key', dest='key', default='', help='API Key') parser.add_argument('-c', '--credfile', dest='credentials', default=os.path.expanduser('~/.dtapi'), help='Optional file with API username and API key, one per line.') parser.add_argument('-l', '--rate-limit', dest='rate_limit', action='store_true', default=False, help='Rate limit API calls against the API based on per minute limits.') parser.add_argument('-f', '--format', dest='format', choices=['list', 'json', 'xml', 'html'], default='json') parser.add_argument('-o', '--outfile', dest='out_file', type=argparse.FileType('wbU'), default=sys.stdout, help='Output file (defaults to stdout)') parser.add_argument('-v', '--version', action='version', version='DomainTools CLI API Client {0}'.format(version)) parser.add_argument('--no-https', dest='https', action='store_false', default=True, help='Use HTTP instead of HTTPS.') parser.add_argument('--no-verify-ssl', dest='verify_ssl', action='store_false', default=True, help='Skip verification of SSL certificate when making HTTPs API calls') subparsers = parser.add_subparsers(help='The name of the API call you wish to perform (`whois` for example)', dest='api_call') subparsers.required = True for api_call in API_CALLS: api_method = getattr(API, api_call) subparser = subparsers.add_parser(api_call, help=api_method.__name__) spec = inspect.getargspec(api_method) for argument_name, default in reversed(list(zip_longest(reversed(spec.args or []), reversed(spec.defaults or []), fillvalue='EMPTY'))): if argument_name == 'self': continue elif default == 'EMPTY': subparser.add_argument(argument_name) else: subparser.add_argument('--{0}'.format(argument_name.replace('_', '-')), dest=argument_name, default=default, nargs='*') arguments = vars(parser.parse_args(args) if args else parser.parse_args()) if not arguments.get('user', None) or not arguments.get('key', None): try: with open(arguments.pop('credentials')) as credentials: arguments['user'], arguments['key'] = credentials.readline().strip(), credentials.readline().strip() except Exception: pass for key, value in arguments.items(): if value in ('-', ['-']): arguments[key] == (line.strip() for line in sys.stdin.readlines()) elif value == []: arguments[key] = True elif type(value) == list and len(value) == 1: arguments[key] = value[0] return (arguments.pop('out_file'), arguments.pop('format'), arguments)
Defines how to parse CLI arguments for the DomainTools API
def retry(self): """ Retry a failed or aborted command. @return: A new ApiCommand object with the updated information. """ path = self._path() + '/retry' resp = self._get_resource_root().post(path) return ApiCommand.from_json_dict(resp, self._get_resource_root())
Retry a failed or aborted command. @return: A new ApiCommand object with the updated information.
def clean_service_url(url): """ Return only the scheme, hostname (with optional port) and path components of the parameter URL. """ parts = urlparse(url) return urlunparse((parts.scheme, parts.netloc, parts.path, '', '', ''))
Return only the scheme, hostname (with optional port) and path components of the parameter URL.
def box_cox(table): """ box-cox transform table """ from scipy.stats import boxcox as bc t = [] for i in table: if min(i) == 0: scale = min([j for j in i if j != 0]) * 10e-10 else: scale = 0 t.append(np.ndarray.tolist(bc(np.array([j + scale for j in i]))[0])) return t
box-cox transform table
def F(Document, __raw__=None, **filters): """Generate a MongoDB filter document through parameter interpolation. Arguments passed by name have their name interpreted as an optional prefix (currently only `not`), a double- underscore Because this utility is likely going to be used frequently it has been given a single-character name. """ ops = Filter(__raw__) args = _process_arguments(Document, FILTER_PREFIX_MAP, FILTER_OPERATION_MAP, filters) for prefix, suffix, field, value in args: if suffix: op = suffix(field, value) else: op = DEFAULT_FILTER(field, value) if prefix: op = prefix(op) ops &= op return ops
Generate a MongoDB filter document through parameter interpolation. Arguments passed by name have their name interpreted as an optional prefix (currently only `not`), a double- underscore Because this utility is likely going to be used frequently it has been given a single-character name.
def make_passwordmanager(schemes=None): """ schemes contains a list of replace this list with the hash(es) you wish to support. this example sets pbkdf2_sha256 as the default, with support for legacy bcrypt hashes. :param schemes: :return: CryptContext() """ from passlib.context import CryptContext if not schemes: schemes = ["pbkdf2_sha256", "bcrypt"] pwd_context = CryptContext(schemes=schemes, deprecated="auto") return pwd_context
schemes contains a list of replace this list with the hash(es) you wish to support. this example sets pbkdf2_sha256 as the default, with support for legacy bcrypt hashes. :param schemes: :return: CryptContext()
def load_command_line_args(clargs=None): """Load and parse command-line arguments. Arguments --------- args : str or None 'Faked' commandline arguments passed to `argparse`. Returns ------- args : `argparse.Namespace` object Namespace in which settings are stored - default values modified by the given command-line arguments. """ import argparse git_vers = get_git() parser = argparse.ArgumentParser( prog='astrocats', description='Generate catalogs for astronomical data.') parser.add_argument('command', nargs='?', default=None) parser.add_argument( '--version', action='version', version='AstroCats v{}, SHA: {}'.format(__version__, git_vers)) parser.add_argument( '--verbose', '-v', dest='verbose', default=False, action='store_true', help='Print more messages to the screen.') parser.add_argument( '--debug', '-d', dest='debug', default=False, action='store_true', help='Print excessive messages to the screen.') parser.add_argument( '--include-private', dest='private', default=False, action='store_true', help='Include private data in import.') parser.add_argument( '--travis', '-t', dest='travis', default=False, action='store_true', help='Run import script in test mode for Travis.') parser.add_argument( '--clone-depth', dest='clone_depth', default=0, type=int, help=('When cloning git repos, only clone out to this depth ' '(default: 0 = all levels).')) parser.add_argument( '--purge-outputs', dest='purge_outputs', default=False, action='store_true', help=('Purge git outputs after cloning.')) parser.add_argument( '--log', dest='log_filename', default=None, help='Filename to which to store logging information.') # If output files should be written or not # ---------------------------------------- write_group = parser.add_mutually_exclusive_group() write_group.add_argument( '--write', action='store_true', dest='write_entries', default=True, help='Write entries to files [default].') write_group.add_argument( '--no-write', action='store_false', dest='write_entries', default=True, help='do not write entries to file.') # If previously cleared output files should be deleted or not # ----------------------------------------------------------- delete_group = parser.add_mutually_exclusive_group() delete_group.add_argument( '--predelete', action='store_true', dest='delete_old', default=True, help='Delete all old event files to begin [default].') delete_group.add_argument( '--no-predelete', action='store_false', dest='delete_old', default=True, help='Do not delete all old event files to start.') args, sub_clargs = parser.parse_known_args(args=clargs) # Print the help information if no command is given if args.command is None: parser.print_help() return None, None return args, sub_clargs
Load and parse command-line arguments. Arguments --------- args : str or None 'Faked' commandline arguments passed to `argparse`. Returns ------- args : `argparse.Namespace` object Namespace in which settings are stored - default values modified by the given command-line arguments.
def calculatePathIntegrationError(self, time, dt=None, trajectory=None, envelope=False, inputNoise=None): """ Calculate the error of our path integration, relative to an ideal module. To do this, we track the movement of an individual bump Note that the network must be trained before this is done. :param time: How long to simulate for in seconds. We recommend using a small value, e.g. ~10s. :param trajectory: An optional trajectory that specifies how the network moves. :param inputNoise: Whether or not to apply noise, and how much. :return: A tuple of the true trajectory and the inferred trajectory. """ # Set up plotting if self.plotting: self.fig = plt.figure() self.ax1 = self.fig.add_subplot(411) self.ax2 = self.fig.add_subplot(412) self.ax3 = self.fig.add_subplot(413) self.ax4 = self.fig.add_subplot(414) plt.tight_layout() plt.ion() self.fig.show() self.fig.canvas.draw() mouse = plt.imread(os.path.dirname(os.path.realpath(__file__)) + "/mouse_graphic.png") self.ax1.set_xlabel("Excitatory population activity") self.ax2.set_xlabel("Inhibitory population activity") self.ax3.set_xlabel("Movement in cells") self.ax3.set_ylabel("Cost") self.ax4.set_xlabel("Location") plt.tight_layout() if dt is None: oldDt = self.dt else: oldDt = self.dt self.dt = dt # Simulate for a second to get nice starting activation bumps. # Turn plotting off so as not to confuse the viewer oldPlotting = self.plotting self.plotting = False self.simulate(1, 1, 1, 0, envelope=envelope, inputNoise=None) self.plotting = oldPlotting estimatedVelocities = [] trueVelocities = [] times = np.arange(0, time, self.dt) if trajectory is None: # Sum together two different sinusoidals for a more interesting path. trajectory = (np.sin((-times*np.pi/10 - np.pi/2.))+1)*2.5 trajectory += (np.cos((-times*np.pi/3 - np.pi/2.))+1)*.75 velocities = np.diff(trajectory)/self.dt oldActivations = copy.copy(self.activationsI) oldX = trajectory[0] for i, t in enumerate(times[:-1]): v = velocities[i] x = trajectory[i] feedforwardInputI = np.ones(self.activationsI.shape) feedforwardInputE = np.ones(self.activationsEL.shape) if inputNoise is not None: noisesI = np.random.random_sample(feedforwardInputI.shape)*inputNoise noisesE = np.random.random_sample(feedforwardInputE.shape)*inputNoise else: noisesE = 1. noisesI = 1. self.update(feedforwardInputI*noisesI, feedforwardInputE*noisesE, v, True, envelope=envelope) estimationTime = np.abs(np.mod(t, ESTIMATION_INTERVAL)) if estimationTime <= 0.00001 or \ np.abs(estimationTime - ESTIMATION_INTERVAL) <= 0.00001: rotations = [np.sum(np.abs(np.roll(oldActivations, i) - self.activationsI)) for i in range(-10, 11, 1)] shift = np.argmin(rotations) - 10 trueVelocities.append(x - oldX) oldX = x oldActivations = copy.copy(self.activationsI) estimatedVelocities.append(shift) if self.plotting: plotTime = np.abs(np.mod(t, PLOT_INTERVAL)) if plotTime <= 0.00001 or np.abs(plotTime - PLOT_INTERVAL) <= 0.00001: self.ax3.clear() self.ax3.plot(np.arange(-len(rotations)/2 + 1, len(rotations)/2 + 1, 1), rotations, color="g", label="Shift") self.ax3.legend(loc="best") self.ax3.set_xlabel("Movement in cells") self.ax3.set_ylabel("Cost") self.ax3.axvline(x=shift) self.ax4.clear() self.ax4.set_xlim(np.amin(trajectory), np.amax(trajectory)) self.ax4.set_ylim(0, 1) mouse_bound = (x - 0.25*np.sign(v), x + 0.25*np.sign(v), .05, .55) self.ax4.imshow(mouse, aspect='auto', extent=mouse_bound, zorder=-1) self.ax4.set_xlabel("Location") self.ax4.axes.get_yaxis().set_visible(False) self.fig.canvas.draw() self.plotActivation(time=t, velocity=v, boosting=False) self.dt = oldDt return(np.asarray(trueVelocities), np.asarray(estimatedVelocities))
Calculate the error of our path integration, relative to an ideal module. To do this, we track the movement of an individual bump Note that the network must be trained before this is done. :param time: How long to simulate for in seconds. We recommend using a small value, e.g. ~10s. :param trajectory: An optional trajectory that specifies how the network moves. :param inputNoise: Whether or not to apply noise, and how much. :return: A tuple of the true trajectory and the inferred trajectory.
def _parse_args(args: List[str]) -> _UpdateArgumentsRunConfig: """ Parses the given CLI arguments to get a run configuration. :param args: CLI arguments :return: run configuration derived from the given CLI arguments """ parser = argparse.ArgumentParser( prog="gitlab-update-variables", description="Tool for setting a GitLab project's build variables") add_common_arguments(parser) parser.add_argument("config_location", type=str, help="Location of the configuration file") parser.add_argument("--setting-repository", dest="setting_repository", nargs="+", type=str, help="Directory from which variable settings groups may be sourced") parser.add_argument("--default-setting-extension", dest="default_setting_extensions",nargs="+", type=str, help="Extensions to try adding to the variable to source location if it does not exist") arguments = parser.parse_args(args) return _UpdateArgumentsRunConfig( arguments.config_location, arguments.setting_repository, arguments.default_setting_extensions, url=arguments.url, token=arguments.token, debug=arguments.debug)
Parses the given CLI arguments to get a run configuration. :param args: CLI arguments :return: run configuration derived from the given CLI arguments
def calc_loss(self, embedding): """Helper function to calculate rieman loss given new embedding""" Hnew = self.compute_dual_rmetric(Ynew=embedding) return self.rieman_loss(Hnew=Hnew)
Helper function to calculate rieman loss given new embedding