code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def calculate_size(name, id): """ Calculates the request payload size""" data_size = 0 data_size += calculate_size_str(name) data_size += calculate_size_str(id) return data_size
Calculates the request payload size
def clear_list_value(self, value): """ Clean the argument value to eliminate None or Falsy values if needed. """ # Don't go any further: this value is empty. if not value: return self.empty_value # Clean empty items if wanted if self.clean_empty: value = [v for v in value if v] return value or self.empty_value
Clean the argument value to eliminate None or Falsy values if needed.
def _evaluate(self,*args,**kwargs): """ NAME: __call__ (_evaluate) PURPOSE: evaluate the actions (jr,lz,jz) INPUT: Either: a) R,vR,vT,z,vz[,phi]: 1) floats: phase-space value for single object (phi is optional) (each can be a Quantity) 2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity) b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument scipy.integrate.quadrature keywords (used when directly evaluating a point off the grid) OUTPUT: (jr,lz,jz) HISTORY: 2012-07-27 - Written - Bovy (IAS@MPIA) NOTE: For a Miyamoto-Nagai potential, this seems accurate to 0.1% and takes ~0.13 ms For a MWPotential, this takes ~ 0.17 ms """ if len(args) == 5: #R,vR.vT, z, vz R,vR,vT, z, vz= args elif len(args) == 6: #R,vR.vT, z, vz, phi R,vR,vT, z, vz, phi= args else: self._parse_eval_args(*args) R= self._eval_R vR= self._eval_vR vT= self._eval_vT z= self._eval_z vz= self._eval_vz #First work on the vertical action Phi= _evaluatePotentials(self._pot,R,z) try: Phio= _evaluatePotentials(self._pot,R,numpy.zeros(len(R))) except TypeError: Phio= _evaluatePotentials(self._pot,R,0.) Ez= Phi-Phio+vz**2./2. #Bigger than Ezzmax? thisEzZmax= numpy.exp(self._EzZmaxsInterp(R)) if isinstance(R,numpy.ndarray): indx= (R > self._Rmax) indx+= (R < self._Rmin) indx+= (Ez != 0.)*(numpy.log(Ez) > thisEzZmax) indxc= True^indx jz= numpy.empty(R.shape) if numpy.sum(indxc) > 0: jz[indxc]= (self._jzInterp.ev(R[indxc],Ez[indxc]/thisEzZmax[indxc])\ *(numpy.exp(self._jzEzmaxInterp(R[indxc]))-10.**-5.)) if numpy.sum(indx) > 0: jz[indx]= self._aA(R[indx], numpy.zeros(numpy.sum(indx)), numpy.ones(numpy.sum(indx)),#these two r dummies numpy.zeros(numpy.sum(indx)), numpy.sqrt(2.*Ez[indx]), _justjz=True, **kwargs)[2] else: if R > self._Rmax or R < self._Rmin or (Ez != 0 and numpy.log(Ez) > thisEzZmax): #Outside of the grid if _PRINTOUTSIDEGRID: #pragma: no cover print("Outside of grid in Ez", R > self._Rmax , R < self._Rmin , (Ez != 0 and numpy.log(Ez) > thisEzZmax)) jz= self._aA(R,0.,1.,#these two r dummies 0.,math.sqrt(2.*Ez), _justjz=True, **kwargs)[2] else: jz= (self._jzInterp(R,Ez/thisEzZmax)\ *(numpy.exp(self._jzEzmaxInterp(R))-10.**-5.))[0][0] #Radial action ERLz= numpy.fabs(R*vT)+self._gamma*jz ER= Phio+vR**2./2.+ERLz**2./2./R**2. thisRL= self._RLInterp(ERLz) thisERRL= -numpy.exp(self._ERRLInterp(ERLz))+self._ERRLmax thisERRa= -numpy.exp(self._ERRaInterp(ERLz))+self._ERRamax if isinstance(R,numpy.ndarray): indx= ((ER-thisERRa)/(thisERRL-thisERRa) > 1.)\ *(((ER-thisERRa)/(thisERRL-thisERRa)-1.) < 10.**-2.) ER[indx]= thisERRL[indx] indx= ((ER-thisERRa)/(thisERRL-thisERRa) < 0.)\ *((ER-thisERRa)/(thisERRL-thisERRa) > -10.**-2.) ER[indx]= thisERRa[indx] indx= (ERLz < self._Lzmin) indx+= (ERLz > self._Lzmax) indx+= ((ER-thisERRa)/(thisERRL-thisERRa) > 1.) indx+= ((ER-thisERRa)/(thisERRL-thisERRa) < 0.) indxc= True^indx jr= numpy.empty(R.shape) if numpy.sum(indxc) > 0: jr[indxc]= (self._jrInterp.ev(ERLz[indxc], (ER[indxc]-thisERRa[indxc])/(thisERRL[indxc]-thisERRa[indxc]))\ *(numpy.exp(self._jrERRaInterp(ERLz[indxc]))-10.**-5.)) if numpy.sum(indx) > 0: jr[indx]= self._aA(thisRL[indx], numpy.sqrt(2.*(ER[indx]-_evaluatePotentials(self._pot,thisRL[indx],0.))-ERLz[indx]**2./thisRL[indx]**2.), ERLz[indx]/thisRL[indx], numpy.zeros(len(thisRL)), numpy.zeros(len(thisRL)), _justjr=True, **kwargs)[0] else: if (ER-thisERRa)/(thisERRL-thisERRa) > 1. \ and ((ER-thisERRa)/(thisERRL-thisERRa)-1.) < 10.**-2.: ER= thisERRL elif (ER-thisERRa)/(thisERRL-thisERRa) < 0. \ and (ER-thisERRa)/(thisERRL-thisERRa) > -10.**-2.: ER= thisERRa #Outside of grid? if ERLz < self._Lzmin or ERLz > self._Lzmax \ or (ER-thisERRa)/(thisERRL-thisERRa) > 1. \ or (ER-thisERRa)/(thisERRL-thisERRa) < 0.: if _PRINTOUTSIDEGRID: #pragma: no cover print("Outside of grid in ER/Lz", ERLz < self._Lzmin , ERLz > self._Lzmax \ , (ER-thisERRa)/(thisERRL-thisERRa) > 1. \ , (ER-thisERRa)/(thisERRL-thisERRa) < 0., ER, thisERRL, thisERRa, (ER-thisERRa)/(thisERRL-thisERRa)) jr= self._aA(thisRL[0], numpy.sqrt(2.*(ER-_evaluatePotentials(self._pot,thisRL,0.))-ERLz**2./thisRL**2.)[0], (ERLz/thisRL)[0], 0.,0., _justjr=True, **kwargs)[0] else: jr= (self._jrInterp(ERLz, (ER-thisERRa)/(thisERRL-thisERRa))\ *(numpy.exp(self._jrERRaInterp(ERLz))-10.**-5.))[0][0] return (jr,R*vT,jz)
NAME: __call__ (_evaluate) PURPOSE: evaluate the actions (jr,lz,jz) INPUT: Either: a) R,vR,vT,z,vz[,phi]: 1) floats: phase-space value for single object (phi is optional) (each can be a Quantity) 2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity) b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument scipy.integrate.quadrature keywords (used when directly evaluating a point off the grid) OUTPUT: (jr,lz,jz) HISTORY: 2012-07-27 - Written - Bovy (IAS@MPIA) NOTE: For a Miyamoto-Nagai potential, this seems accurate to 0.1% and takes ~0.13 ms For a MWPotential, this takes ~ 0.17 ms
def post(method, hmc, uri, uri_parms, body, logon_required, wait_for_completion): """Operation: Mount ISO Image (requires DPM mode).""" assert wait_for_completion is True # synchronous operation partition_oid = uri_parms[0] partition_uri = '/api/partitions/' + partition_oid try: partition = hmc.lookup_by_uri(partition_uri) except KeyError: raise InvalidResourceError(method, uri) cpc = partition.manager.parent assert cpc.dpm_enabled check_valid_cpc_status(method, uri, cpc) check_partition_status(method, uri, partition, invalid_statuses=['starting', 'stopping']) # Parse and check required query parameters query_parms = parse_query_parms(method, uri, uri_parms[1]) try: image_name = query_parms['image-name'] except KeyError: raise BadRequestError( method, uri, reason=1, message="Missing required URI query parameter 'image-name'") try: ins_file_name = query_parms['ins-file-name'] except KeyError: raise BadRequestError( method, uri, reason=1, message="Missing required URI query parameter 'ins-file-name'") # Reflect the effect of mounting in the partition properties partition.properties['boot-iso-image-name'] = image_name partition.properties['boot-iso-ins-file'] = ins_file_name return {}
Operation: Mount ISO Image (requires DPM mode).
def kill_zombies(self, zombies, session=None): """ Fail given zombie tasks, which are tasks that haven't had a heartbeat for too long, in the current DagBag. :param zombies: zombie task instances to kill. :type zombies: airflow.utils.dag_processing.SimpleTaskInstance :param session: DB session. :type session: sqlalchemy.orm.session.Session """ from airflow.models.taskinstance import TaskInstance # Avoid circular import for zombie in zombies: if zombie.dag_id in self.dags: dag = self.dags[zombie.dag_id] if zombie.task_id in dag.task_ids: task = dag.get_task(zombie.task_id) ti = TaskInstance(task, zombie.execution_date) # Get properties needed for failure handling from SimpleTaskInstance. ti.start_date = zombie.start_date ti.end_date = zombie.end_date ti.try_number = zombie.try_number ti.state = zombie.state ti.test_mode = configuration.getboolean('core', 'unit_test_mode') ti.handle_failure("{} detected as zombie".format(ti), ti.test_mode, ti.get_template_context()) self.log.info( 'Marked zombie job %s as %s', ti, ti.state) Stats.incr('zombies_killed') session.commit()
Fail given zombie tasks, which are tasks that haven't had a heartbeat for too long, in the current DagBag. :param zombies: zombie task instances to kill. :type zombies: airflow.utils.dag_processing.SimpleTaskInstance :param session: DB session. :type session: sqlalchemy.orm.session.Session
def iri_to_uri(value, normalize=False): """ Encodes a unicode IRI into an ASCII byte string URI :param value: A unicode string of an IRI :param normalize: A bool that controls URI normalization :return: A byte string of the ASCII-encoded URI """ if not isinstance(value, str_cls): raise TypeError(unwrap( ''' value must be a unicode string, not %s ''', type_name(value) )) scheme = None # Python 2.6 doesn't split properly is the URL doesn't start with http:// or https:// if sys.version_info < (2, 7) and not value.startswith('http://') and not value.startswith('https://'): real_prefix = None prefix_match = re.match('^[^:]*://', value) if prefix_match: real_prefix = prefix_match.group(0) value = 'http://' + value[len(real_prefix):] parsed = urlsplit(value) if real_prefix: value = real_prefix + value[7:] scheme = _urlquote(real_prefix[:-3]) else: parsed = urlsplit(value) if scheme is None: scheme = _urlquote(parsed.scheme) hostname = parsed.hostname if hostname is not None: hostname = hostname.encode('idna') # RFC 3986 allows userinfo to contain sub-delims username = _urlquote(parsed.username, safe='!$&\'()*+,;=') password = _urlquote(parsed.password, safe='!$&\'()*+,;=') port = parsed.port if port is not None: port = str_cls(port).encode('ascii') netloc = b'' if username is not None: netloc += username if password: netloc += b':' + password netloc += b'@' if hostname is not None: netloc += hostname if port is not None: default_http = scheme == b'http' and port == b'80' default_https = scheme == b'https' and port == b'443' if not normalize or (not default_http and not default_https): netloc += b':' + port # RFC 3986 allows a path to contain sub-delims, plus "@" and ":" path = _urlquote(parsed.path, safe='/!$&\'()*+,;=@:') # RFC 3986 allows the query to contain sub-delims, plus "@", ":" , "/" and "?" query = _urlquote(parsed.query, safe='/?!$&\'()*+,;=@:') # RFC 3986 allows the fragment to contain sub-delims, plus "@", ":" , "/" and "?" fragment = _urlquote(parsed.fragment, safe='/?!$&\'()*+,;=@:') if normalize and query is None and fragment is None and path == b'/': path = None # Python 2.7 compat if path is None: path = '' output = urlunsplit((scheme, netloc, path, query, fragment)) if isinstance(output, str_cls): output = output.encode('latin1') return output
Encodes a unicode IRI into an ASCII byte string URI :param value: A unicode string of an IRI :param normalize: A bool that controls URI normalization :return: A byte string of the ASCII-encoded URI
def get_directories_re( self, directory_re, full_path=False, ignorecase=False): """Same as get_files_re, but for directories""" if ignorecase: compiled_re = re.compile(directory_re, re.I) else: compiled_re = re.compile(directory_re) found = set() if self.handle: for member in self.handle.getmembers(): # zipfiles only list directories => have to work around that if isinstance(member, ZipInfo): to_match = os.path.dirname(member.name) # tarfiles => only match directories elif isinstance(member, TarInfo) and member.isdir(): to_match = member.name else: to_match = None if to_match: if ((full_path and compiled_re.search(to_match)) or ( not full_path and compiled_re.search( os.path.basename(to_match)))): found.add(to_match) return list(found)
Same as get_files_re, but for directories
def reset(self): """Reset the current animation generator.""" animation_gen = self._frame_function(*self._animation_args, **self._animation_kwargs) self._current_generator = itertools.cycle( util.concatechain(animation_gen, self._back_up_generator))
Reset the current animation generator.
def main(): """Main loop.""" parser = optparse.OptionParser() parser.add_option( '-s', '--server', help='Index Server Name', metavar='SERVER') parser.add_option( '-r', '--repository', help='Repository URL', metavar='URL') parser.add_option( '-u', '--username', help='User Name', metavar='USERNAME') parser.add_option( '-p', '--password', help='Password', metavar='PASSWORD') options, _ = parser.parse_args() myrc = pypirc.PyPiRC() if options.server: if myrc.servers: # we're updating server = myrc.servers.get(options.server, {}) else: server = {} if options.repository: server['repository'] = options.repository if options.username: server['username'] = options.username if options.password: server['password'] = options.password myrc.servers[options.server] = server myrc.save() if myrc.servers: pprint.pprint(myrc.servers) else: print '.pypirc Empty!'
Main loop.
def monthdayscalendar(cls, year, month): """Return a list of the weeks in the month month of the year as full weeks. Weeks are lists of seven day numbers.""" weeks = [] week = [] for day in NepCal.itermonthdays(year, month): week.append(day) if len(week) == 7: weeks.append(week) week = [] if len(week) > 0: weeks.append(week) return weeks
Return a list of the weeks in the month month of the year as full weeks. Weeks are lists of seven day numbers.
def abort_io(self, iocb, err): """Forward the abort downstream.""" if _debug: IOChainMixIn._debug("abort_io %r %r", iocb, err) # make sure we're being notified of an abort request from # the iocb we are chained from if iocb is not self.ioChain: raise RuntimeError("broken chain") # call my own Abort(), which may forward it to a controller or # be overridden by IOGroup self.abort(err)
Forward the abort downstream.
def list_resource_record_sets(self, max_results=None, page_token=None, client=None): """List resource record sets for this zone. See https://cloud.google.com/dns/api/v1/resourceRecordSets/list :type max_results: int :param max_results: Optional. The maximum number of resource record sets to return. Defaults to a sensible value set by the API. :type page_token: str :param page_token: Optional. If present, return the next batch of resource record sets, using the value, which must correspond to the ``nextPageToken`` value returned in the previous response. Deprecated: use the ``pages`` property of the returned iterator instead of manually passing the token. :type client: :class:`google.cloud.dns.client.Client` :param client: (Optional) the client to use. If not passed, falls back to the ``client`` stored on the current zone. :rtype: :class:`~google.api_core.page_iterator.Iterator` :returns: Iterator of :class:`~.resource_record_set.ResourceRecordSet` belonging to this zone. """ client = self._require_client(client) path = "/projects/%s/managedZones/%s/rrsets" % (self.project, self.name) iterator = page_iterator.HTTPIterator( client=client, api_request=client._connection.api_request, path=path, item_to_value=_item_to_resource_record_set, items_key="rrsets", page_token=page_token, max_results=max_results, ) iterator.zone = self return iterator
List resource record sets for this zone. See https://cloud.google.com/dns/api/v1/resourceRecordSets/list :type max_results: int :param max_results: Optional. The maximum number of resource record sets to return. Defaults to a sensible value set by the API. :type page_token: str :param page_token: Optional. If present, return the next batch of resource record sets, using the value, which must correspond to the ``nextPageToken`` value returned in the previous response. Deprecated: use the ``pages`` property of the returned iterator instead of manually passing the token. :type client: :class:`google.cloud.dns.client.Client` :param client: (Optional) the client to use. If not passed, falls back to the ``client`` stored on the current zone. :rtype: :class:`~google.api_core.page_iterator.Iterator` :returns: Iterator of :class:`~.resource_record_set.ResourceRecordSet` belonging to this zone.
def _check_ipcidr_minions(self, expr, greedy): ''' Return the minions found by looking via ipcidr ''' cache_enabled = self.opts.get('minion_data_cache', False) if greedy: minions = self._pki_minions() elif cache_enabled: minions = self.cache.list('minions') else: return {'minions': [], 'missing': []} if cache_enabled: if greedy: cminions = self.cache.list('minions') else: cminions = minions if cminions is None: return {'minions': minions, 'missing': []} tgt = expr try: # Target is an address? tgt = ipaddress.ip_address(tgt) except Exception: try: # Target is a network? tgt = ipaddress.ip_network(tgt) except Exception: log.error('Invalid IP/CIDR target: %s', tgt) return {'minions': [], 'missing': []} proto = 'ipv{0}'.format(tgt.version) minions = set(minions) for id_ in cminions: mdata = self.cache.fetch('minions/{0}'.format(id_), 'data') if mdata is None: if not greedy: minions.remove(id_) continue grains = mdata.get('grains') if grains is None or proto not in grains: match = False elif isinstance(tgt, (ipaddress.IPv4Address, ipaddress.IPv6Address)): match = six.text_type(tgt) in grains[proto] else: match = salt.utils.network.in_subnet(tgt, grains[proto]) if not match and id_ in minions: minions.remove(id_) return {'minions': list(minions), 'missing': []}
Return the minions found by looking via ipcidr
def get_section_metrics(cls): """ Get the mapping between metrics and sections in Manuscripts report :return: a dict with the mapping between metrics and sections in Manuscripts report """ return { "overview": { "activity_metrics": [Closed, Submitted], "author_metrics": None, "bmi_metrics": [BMI], "time_to_close_metrics": [DaysToMergeMedian], "projects_metrics": [Projects], }, "com_channels": { "activity_metrics": [], "author_metrics": [] }, "project_activity": { "metrics": [Submitted, Closed] }, "project_community": { "author_metrics": [], "people_top_metrics": [], "orgs_top_metrics": [], }, "project_process": { "bmi_metrics": [BMI], "time_to_close_metrics": [], "time_to_close_title": "", "time_to_close_review_metrics": [DaysToMergeAverage, DaysToMergeMedian], "time_to_close_review_title": "Days to close review (median and average)", "patchsets_metrics": [PatchsetsMedian, PatchsetsAverage], "patchsets_title": "Number of patchsets per review (median and average)" } }
Get the mapping between metrics and sections in Manuscripts report :return: a dict with the mapping between metrics and sections in Manuscripts report
def get_device(self): """ Returns a reference to the device that is represented by this node. Returns None if no such device can be determined. """ addr = self.address servers = [server for server in pyrax.cloudservers.list() if addr in server.networks.get("private", "")] try: return servers[0] except IndexError: return None
Returns a reference to the device that is represented by this node. Returns None if no such device can be determined.
def get_impala_queries(self, start_time, end_time, filter_str="", limit=100, offset=0): """ Returns a list of queries that satisfy the filter @type start_time: datetime.datetime. Note that the datetime must either be time zone aware or specified in the server time zone. See the python datetime documentation for more details about python's time zone handling. @param start_time: Queries must have ended after this time @type end_time: datetime.datetime. Note that the datetime must either be time zone aware or specified in the server time zone. See the python datetime documentation for more details about python's time zone handling. @param end_time: Queries must have started before this time @param filter_str: A filter to apply to the queries. For example: 'user = root and queryDuration > 5s' @param limit: The maximum number of results to return @param offset: The offset into the return list @since: API v4 """ params = { 'from': start_time.isoformat(), 'to': end_time.isoformat(), 'filter': filter_str, 'limit': limit, 'offset': offset, } return self._get("impalaQueries", ApiImpalaQueryResponse, params=params, api_version=4)
Returns a list of queries that satisfy the filter @type start_time: datetime.datetime. Note that the datetime must either be time zone aware or specified in the server time zone. See the python datetime documentation for more details about python's time zone handling. @param start_time: Queries must have ended after this time @type end_time: datetime.datetime. Note that the datetime must either be time zone aware or specified in the server time zone. See the python datetime documentation for more details about python's time zone handling. @param end_time: Queries must have started before this time @param filter_str: A filter to apply to the queries. For example: 'user = root and queryDuration > 5s' @param limit: The maximum number of results to return @param offset: The offset into the return list @since: API v4
def post(self, request, key): """ Process notitication hooks: 1. Obtain Hook 2. Check Auth 3. Delay processing to a task 4. Respond requester """ try: hook = Hook.objects.get(key=key, enabled=True) except Hook.DoesNotExist: msg = _("Key %s not associated to an enabled hook or bot") % key logger.warning(msg) return Response(msg, status=status.HTTP_404_NOT_FOUND) if hook.bot.owner != request.user: raise exceptions.AuthenticationFailed() try: parsed_data = request.data logger.debug("Hook %s attending request %s" % (hook, parsed_data)) handle_hook.delay(hook.id, parsed_data) except ParseError as e: return Response(str(e), status=status.HTTP_400_BAD_REQUEST) except: exc_info = sys.exc_info() traceback.print_exception(*exc_info) msg = _("Error processing %s for key %s") % (request.data, key) logger.error(msg) return Response(msg, status=status.HTTP_500_INTERNAL_SERVER_ERROR) else: return Response(status=status.HTTP_200_OK)
Process notitication hooks: 1. Obtain Hook 2. Check Auth 3. Delay processing to a task 4. Respond requester
def bind(self, **config): """ Bind all unbound types to the engine. Bind each unbound typedef to the engine, passing in the engine and :attr:`config`. The resulting ``load`` and ``dump`` functions can be found under ``self.bound_types[typedef]["load"]`` and ``self.bound_types[typedef]["dump"], respectively. Parameters ---------- config : dict, optional Engine-binding configuration to pass to each typedef that will be bound. Examples include floating-point precision values, maximum lengths for strings, or any other translation constraints/settings that a typedef needs to construct a load/dump function pair. """ while self.unbound_types: typedef = self.unbound_types.pop() try: load, dump = typedef.bind(self, **config) self.bound_types[typedef] = { "load": load, "dump": dump } except Exception: self.unbound_types.add(typedef) raise
Bind all unbound types to the engine. Bind each unbound typedef to the engine, passing in the engine and :attr:`config`. The resulting ``load`` and ``dump`` functions can be found under ``self.bound_types[typedef]["load"]`` and ``self.bound_types[typedef]["dump"], respectively. Parameters ---------- config : dict, optional Engine-binding configuration to pass to each typedef that will be bound. Examples include floating-point precision values, maximum lengths for strings, or any other translation constraints/settings that a typedef needs to construct a load/dump function pair.
def delete(self, primary_key): ''' a method to delete a record in the table :param primary_key: string with primary key of record :return: string with status message ''' title = '%s.delete' % self.__class__.__name__ # delete object delete_statement = self.table.delete(self.table.c.id==primary_key) self.session.execute(delete_statement) # return message exit_msg = '%s has been deleted.' % primary_key return exit_msg
a method to delete a record in the table :param primary_key: string with primary key of record :return: string with status message
def copy_ssh_keys_to_hosts(self, hosts, known_hosts=DEFAULT_KNOWN_HOSTS, dry=False): """ Copy the SSH keys to the given hosts. :param hosts: the list of `Host` objects to copy the SSH keys to. :param known_hosts: the `known_hosts` file to store the SSH public keys. :param dry: perform a dry run. :raise msshcopyid.errors.CopySSHKeysError: """ exceptions = [] # list of `CopySSHKeyError` for host in hosts: logger.info('[%s] Copy the SSH public key [%s]...', host.hostname, self.sshcopyid.pub_key) if not dry: try: self.copy_ssh_keys_to_host(host, known_hosts=known_hosts) except (paramiko.ssh_exception.SSHException, socket.error) as ex: logger.error(format_error(format_exception(ex))) logger.debug(traceback.format_exc()) exceptions.append(CopySSHKeyError(host=host, exception=ex)) if exceptions: raise CopySSHKeysError(exceptions=exceptions)
Copy the SSH keys to the given hosts. :param hosts: the list of `Host` objects to copy the SSH keys to. :param known_hosts: the `known_hosts` file to store the SSH public keys. :param dry: perform a dry run. :raise msshcopyid.errors.CopySSHKeysError:
def alterar(self, id_user_group, name, read, write, edit, remove): """Edit user group data from its identifier. :param id_user_group: User group id. :param name: User group name. :param read: If user group has read permission ('S' ou 'N'). :param write: If user group has write permission ('S' ou 'N'). :param edit: If user group has edit permission ('S' ou 'N'). :param remove: If user group has remove permission ('S' ou 'N'). :return: None :raise NomeGrupoUsuarioDuplicadoError: User group name already exists. :raise ValorIndicacaoPermissaoInvalidoError: Read, write, edit or remove value is invalid. :raise GrupoUsuarioNaoExisteError: User Group not found. :raise InvalidParameterError: At least one of the parameters is invalid or none. :raise DataBaseError: Networkapi failed to access database. :raise XMLError: Networkapi fails generating response XML. """ if not is_valid_int_param(id_user_group): raise InvalidParameterError( u'Invalid or inexistent user group id.') url = 'ugroup/' + str(id_user_group) + '/' ugroup_map = dict() ugroup_map['nome'] = name ugroup_map['leitura'] = read ugroup_map['escrita'] = write ugroup_map['edicao'] = edit ugroup_map['exclusao'] = remove code, xml = self.submit({'user_group': ugroup_map}, 'PUT', url) return self.response(code, xml)
Edit user group data from its identifier. :param id_user_group: User group id. :param name: User group name. :param read: If user group has read permission ('S' ou 'N'). :param write: If user group has write permission ('S' ou 'N'). :param edit: If user group has edit permission ('S' ou 'N'). :param remove: If user group has remove permission ('S' ou 'N'). :return: None :raise NomeGrupoUsuarioDuplicadoError: User group name already exists. :raise ValorIndicacaoPermissaoInvalidoError: Read, write, edit or remove value is invalid. :raise GrupoUsuarioNaoExisteError: User Group not found. :raise InvalidParameterError: At least one of the parameters is invalid or none. :raise DataBaseError: Networkapi failed to access database. :raise XMLError: Networkapi fails generating response XML.
def exit_success(jid, ext_source=None): ''' Check if a job has been executed and exit successfully jid The jid to look up. ext_source The external job cache to use. Default: `None`. CLI Example: .. code-block:: bash salt-run jobs.exit_success 20160520145827701627 ''' ret = dict() data = list_job( jid, ext_source=ext_source ) minions = data.get('Minions', []) result = data.get('Result', {}) for minion in minions: if minion in result and 'return' in result[minion]: ret[minion] = True if result[minion]['return'] else False else: ret[minion] = False for minion in result: if 'return' in result[minion] and result[minion]['return']: ret[minion] = True return ret
Check if a job has been executed and exit successfully jid The jid to look up. ext_source The external job cache to use. Default: `None`. CLI Example: .. code-block:: bash salt-run jobs.exit_success 20160520145827701627
def list_nodes_full(**kwargs): ''' Return all data on nodes ''' nodes = _query('server/list') ret = {} for node in nodes: name = nodes[node]['label'] ret[name] = nodes[node].copy() ret[name]['id'] = node ret[name]['image'] = nodes[node]['os'] ret[name]['size'] = nodes[node]['VPSPLANID'] ret[name]['state'] = nodes[node]['status'] ret[name]['private_ips'] = nodes[node]['internal_ip'] ret[name]['public_ips'] = nodes[node]['main_ip'] return ret
Return all data on nodes
def gen_sponsor_schedule(user, sponsor=None, num_blocks=6, surrounding_blocks=None, given_date=None): r"""Return a list of :class:`EighthScheduledActivity`\s in which the given user is sponsoring. Returns: Dictionary with: activities no_attendance_today num_acts """ no_attendance_today = None acts = [] if sponsor is None: sponsor = user.get_eighth_sponsor() if surrounding_blocks is None: surrounding_blocks = EighthBlock.objects.get_upcoming_blocks(num_blocks) activities_sponsoring = (EighthScheduledActivity.objects.for_sponsor(sponsor).select_related("block").filter(block__in=surrounding_blocks)) sponsoring_block_map = {} for sa in activities_sponsoring: bid = sa.block.id if bid in sponsoring_block_map: sponsoring_block_map[bid] += [sa] else: sponsoring_block_map[bid] = [sa] num_acts = 0 for b in surrounding_blocks: num_added = 0 sponsored_for_block = sponsoring_block_map.get(b.id, []) for schact in sponsored_for_block: acts.append(schact) if schact.block.is_today(): if not schact.attendance_taken and schact.block.locked: no_attendance_today = True num_added += 1 if num_added == 0: # fake an entry for a block where there is no sponsorship acts.append({"block": b, "id": None, "fake": True}) else: num_acts += 1 logger.debug(acts) cur_date = surrounding_blocks[0].date if acts else given_date if given_date else datetime.now().date() last_block = surrounding_blocks[len(surrounding_blocks) - 1] if surrounding_blocks else None last_block_date = last_block.date + timedelta(days=1) if last_block else cur_date next_blocks = list(last_block.next_blocks(1)) if last_block else None next_date = next_blocks[0].date if next_blocks else last_block_date first_block = surrounding_blocks[0] if surrounding_blocks else None if cur_date and not first_block: first_block = EighthBlock.objects.filter(date__lte=cur_date).last() first_block_date = first_block.date + timedelta(days=-7) if first_block else cur_date prev_blocks = list(first_block.previous_blocks(num_blocks - 1)) if first_block else None prev_date = prev_blocks[0].date if prev_blocks else first_block_date return { "sponsor_schedule": acts, "no_attendance_today": no_attendance_today, "num_attendance_acts": num_acts, "sponsor_schedule_cur_date": cur_date, "sponsor_schedule_next_date": next_date, "sponsor_schedule_prev_date": prev_date }
r"""Return a list of :class:`EighthScheduledActivity`\s in which the given user is sponsoring. Returns: Dictionary with: activities no_attendance_today num_acts
def get_localhost(): ''' Should return 127.0.0.1 in ipv4 and ::1 in ipv6 localhost is not used because on windows vista/windows 7, there can be issues where the resolving doesn't work properly and takes a lot of time (had this issue on the pyunit server). Using the IP directly solves the problem. ''' # TODO: Needs better investigation! global _cache if _cache is None: try: for addr_info in socket.getaddrinfo("localhost", 80, 0, 0, socket.SOL_TCP): config = addr_info[4] if config[0] == '127.0.0.1': _cache = '127.0.0.1' return _cache except: # Ok, some versions of Python don't have getaddrinfo or SOL_TCP... Just consider it 127.0.0.1 in this case. _cache = '127.0.0.1' else: _cache = 'localhost' return _cache
Should return 127.0.0.1 in ipv4 and ::1 in ipv6 localhost is not used because on windows vista/windows 7, there can be issues where the resolving doesn't work properly and takes a lot of time (had this issue on the pyunit server). Using the IP directly solves the problem.
def _table_sort_by(table, sort_exprs): """ Sort table by the indicated column expressions and sort orders (ascending/descending) Parameters ---------- sort_exprs : sorting expressions Must be one of: - Column name or expression - Sort key, e.g. desc(col) - (column name, True (ascending) / False (descending)) Examples -------- >>> import ibis >>> t = ibis.table([('a', 'int64'), ('b', 'string')]) >>> ab_sorted = t.sort_by([('a', True), ('b', False)]) Returns ------- sorted : TableExpr """ result = table.op().sort_by(table, sort_exprs) return result.to_expr()
Sort table by the indicated column expressions and sort orders (ascending/descending) Parameters ---------- sort_exprs : sorting expressions Must be one of: - Column name or expression - Sort key, e.g. desc(col) - (column name, True (ascending) / False (descending)) Examples -------- >>> import ibis >>> t = ibis.table([('a', 'int64'), ('b', 'string')]) >>> ab_sorted = t.sort_by([('a', True), ('b', False)]) Returns ------- sorted : TableExpr
def set_language(self): """Parses feed language and set value""" try: self.language = self.soup.find('language').string except AttributeError: self.language = None
Parses feed language and set value
def output_results(results, mvdelim = '\n', output = sys.stdout): """Given a list of dictionaries, each representing a single result, and an optional list of fields, output those results to stdout for consumption by the Splunk pipeline""" # We collect all the unique field names, as well as # convert all multivalue keys to the right form fields = set() for result in results: for key in result.keys(): if(isinstance(result[key], list)): result['__mv_' + key] = encode_mv(result[key]) result[key] = mvdelim.join(result[key]) fields.update(list(result.keys())) # convert the fields into a list and create a CSV writer # to output to stdout fields = sorted(list(fields)) writer = csv.DictWriter(output, fields) # Write out the fields, and then the actual results writer.writerow(dict(list(zip(fields, fields)))) writer.writerows(results)
Given a list of dictionaries, each representing a single result, and an optional list of fields, output those results to stdout for consumption by the Splunk pipeline
def dump( state, host, remote_filename, database=None, # Details for speaking to PostgreSQL via `psql` CLI postgresql_user=None, postgresql_password=None, postgresql_host=None, postgresql_port=None, ): ''' Dump a PostgreSQL database into a ``.sql`` file. Requires ``mysqldump``. + database: name of the database to dump + remote_filename: name of the file to dump the SQL to + postgresql_*: global module arguments, see above ''' yield '{0} > {1}'.format(make_psql_command( executable='pg_dump', database=database, user=postgresql_user, password=postgresql_password, host=postgresql_host, port=postgresql_port, ), remote_filename)
Dump a PostgreSQL database into a ``.sql`` file. Requires ``mysqldump``. + database: name of the database to dump + remote_filename: name of the file to dump the SQL to + postgresql_*: global module arguments, see above
def clean(self: 'TSelf', *, atol: float=1e-9) -> 'TSelf': """Remove terms with coefficients of absolute value atol or less.""" negligible = [v for v, c in self._terms.items() if abs(c) <= atol] for v in negligible: del self._terms[v] return self
Remove terms with coefficients of absolute value atol or less.
def safe_filename(self, otype, oid): """Santize obj name into fname and verify doesn't already exist""" permitted = set(['_', '-', '(', ')']) oid = ''.join([c for c in oid if c.isalnum() or c in permitted]) while oid.find('--') != -1: oid = oid.replace('--', '-') ext = 'json' ts = datetime.now().strftime("%Y%m%dT%H%M%S") fname = '' is_new = False while not is_new: oid_len = 255 - len('%s--%s.%s' % (otype, ts, ext)) fname = '%s-%s-%s.%s' % (otype, oid[:oid_len], ts, ext) is_new = True if os.path.exists(fname): is_new = False ts += '-bck' return fname
Santize obj name into fname and verify doesn't already exist
def _resetID(self): """Reset all ID fields.""" # Dirty.. .=)) self._setID((None,) * len(self._sqlPrimary)) self._new = True
Reset all ID fields.
def load_from_cache(path=user_path): ''' Try to load category ranges from userlevel cache file. :param path: path to userlevel cache file :type path: str :returns: category ranges dict or None :rtype: None or dict of RangeGroup ''' if not path: return try: with open(path, 'rb') as f: dversion, mversion, data = pickle.load(f) if dversion == data_version and mversion == module_version: return data except (FileNotFoundError, ValueError, EOFError): pass
Try to load category ranges from userlevel cache file. :param path: path to userlevel cache file :type path: str :returns: category ranges dict or None :rtype: None or dict of RangeGroup
def evaluate(self, sequence, transformations): """ Execute the sequence of transformations in parallel :param sequence: Sequence to evaluation :param transformations: Transformations to apply :return: Resulting sequence or value """ result = sequence parallel = partial( parallelize, processes=self.processes, partition_size=self.partition_size) staged = [] for transform in transformations: strategies = transform.execution_strategies or {} if ExecutionStrategies.PARALLEL in strategies: staged.insert(0, transform.function) else: if staged: result = parallel(compose(*staged), result) staged = [] if ExecutionStrategies.PRE_COMPUTE in strategies: result = list(result) result = transform.function(result) if staged: result = parallel(compose(*staged), result) return iter(result)
Execute the sequence of transformations in parallel :param sequence: Sequence to evaluation :param transformations: Transformations to apply :return: Resulting sequence or value
def support_jsonp(api_instance, callback_name_source='callback'): """Let API instance can respond jsonp request automatically. `callback_name_source` can be a string or a callback. If it is a string, the system will find the argument that named by this string in `query string`. If found, determine this request to be a jsonp request, and use the argument's value as the js callback name. If `callback_name_source` is a callback, this callback should return js callback name when request is a jsonp request, and return False when request is not jsonp request. And system will handle request according to its return value. default support format:url?callback=js_callback_name """ output_json = api_instance.representations['application/json'] @api_instance.representation('application/json') def handle_jsonp(data, code, headers=None): resp = output_json(data, code, headers) if code == 200: callback = request.args.get(callback_name_source, False) if not callable(callback_name_source) \ else callback_name_source() if callback: resp.set_data(str(callback) + '(' + resp.get_data().decode("utf-8") + ')') return resp
Let API instance can respond jsonp request automatically. `callback_name_source` can be a string or a callback. If it is a string, the system will find the argument that named by this string in `query string`. If found, determine this request to be a jsonp request, and use the argument's value as the js callback name. If `callback_name_source` is a callback, this callback should return js callback name when request is a jsonp request, and return False when request is not jsonp request. And system will handle request according to its return value. default support format:url?callback=js_callback_name
def text(self): """Return the string to render.""" if callable(self._text): return str(self._text()) return str(self._text)
Return the string to render.
def get_processes(self): """ Grab a shuffled list of all currently running process names """ procs = set() try: # POSIX ps, so it should work in most environments where doge would p = sp.Popen(['ps', '-A', '-o', 'comm='], stdout=sp.PIPE) output, error = p.communicate() if sys.version_info > (3, 0): output = output.decode('utf-8') for comm in output.split('\n'): name = comm.split('/')[-1] # Filter short and weird ones if name and len(name) >= 2 and ':' not in name: procs.add(name) finally: # Either it executed properly or no ps was found. proc_list = list(procs) random.shuffle(proc_list) return proc_list
Grab a shuffled list of all currently running process names
def decode(self, encoded): """ Decodes ``encoded`` label. Args: encoded (torch.Tensor): Encoded label. Returns: object: Label decoded from ``encoded``. """ encoded = super().decode(encoded) if encoded.numel() > 1: raise ValueError( '``decode`` decodes one label at a time, use ``batch_decode`` instead.') return self.itos[encoded.squeeze().item()]
Decodes ``encoded`` label. Args: encoded (torch.Tensor): Encoded label. Returns: object: Label decoded from ``encoded``.
def _validate(self, all_valid_addresses): """Validate that all of the dependencies in the graph exist in the given addresses set.""" for dependency, dependents in iteritems(self._dependent_address_map): if dependency not in all_valid_addresses: raise AddressLookupError( 'Dependent graph construction failed: {} did not exist. Was depended on by:\n {}'.format( dependency.spec, '\n '.join(d.spec for d in dependents) ) )
Validate that all of the dependencies in the graph exist in the given addresses set.
def plot_gos(fout_png, goids, obo_dag, *args, **kws): """Given GO ids and the obo_dag, create a plot of paths from GO ids.""" engine = kws['engine'] if 'engine' in kws else 'pydot' godagsmall = OboToGoDagSmall(goids=goids, obodag=obo_dag).godag godagplot = GODagSmallPlot(godagsmall, *args, **kws) godagplot.plt(fout_png, engine)
Given GO ids and the obo_dag, create a plot of paths from GO ids.
def format_header_cell(val): """ Formats given header column. This involves changing '_Px_' to '(', '_xP_' to ')' and all other '_' to spaces. """ return re.sub('_', ' ', re.sub(r'(_Px_)', '(', re.sub(r'(_xP_)', ')', str(val) )))
Formats given header column. This involves changing '_Px_' to '(', '_xP_' to ')' and all other '_' to spaces.
def get_stops(records, group_dist): """ Group records arounds stop locations and returns a list of dict(location, records) for each stop. Parameters ---------- records : list A list of Record objects ordered by non-decreasing datetime group_dist : float Minimum distance (in meters) to switch to a new stop. """ def traverse(start, next): position_prev = records[next - 1].position.location position_next = records[next].position.location dist = 1000 * great_circle_distance(position_prev, position_next) return dist <= group_dist groups = _groupwhile(records, traverse) def median(x): return sorted(x)[len(x) // 2] stops = [] for g in groups: _lat = median([gv.position.location[0] for gv in g]) _lon = median([gv.position.location[1] for gv in g]) stops.append({ 'location': (_lat, _lon), 'records': g, }) return stops
Group records arounds stop locations and returns a list of dict(location, records) for each stop. Parameters ---------- records : list A list of Record objects ordered by non-decreasing datetime group_dist : float Minimum distance (in meters) to switch to a new stop.
def round(self, multiple=1): """ Rounds the kerning values to increments of **multiple**, which will be an ``int``. The default behavior is to round to increments of 1. """ if not isinstance(multiple, int): raise TypeError("The round multiple must be an int not %s." % multiple.__class__.__name__) self._round(multiple)
Rounds the kerning values to increments of **multiple**, which will be an ``int``. The default behavior is to round to increments of 1.
def popitem(self): """Remove and return the `(key, value)` pair least recently used that has not already expired. """ with self.__timer as time: self.expire(time) try: key = next(iter(self.__links)) except StopIteration: raise KeyError('%s is empty' % self.__class__.__name__) else: return (key, self.pop(key))
Remove and return the `(key, value)` pair least recently used that has not already expired.
def methods(self, methods): """Setter method; for a description see the getter method.""" # We make sure that the dictionary is a NocaseDict object, and that the # property values are CIMMethod objects: # pylint: disable=attribute-defined-outside-init self._methods = NocaseDict() if methods: try: # This is used for iterables: iterator = methods.items() except AttributeError: # This is used for dictionaries: iterator = methods for item in iterator: if isinstance(item, CIMMethod): key = item.name value = item elif isinstance(item, tuple): key, value = item else: raise TypeError( _format("Input object for methods has invalid item in " "iterable: {0!A}", item)) self.methods[key] = _cim_method(key, value)
Setter method; for a description see the getter method.
def runner(self): """ Run the necessary methods in the correct order """ printtime('Starting mashsippr analysis pipeline', self.starttime) if not self.pipeline: # Create the objects to be used in the analyses objects = Objectprep(self) objects.objectprep() self.runmetadata = objects.samples # Run the analyses Mash(self, self.analysistype)
Run the necessary methods in the correct order
def GpsSecondsFromPyUTC( pyUTC, leapSecs=14 ): """converts the python epoch to gps seconds pyEpoch = the python epoch from time.time() """ t = t=gpsFromUTC(*ymdhmsFromPyUTC( pyUTC )) return int(t[0] * 60 * 60 * 24 * 7 + t[1])
converts the python epoch to gps seconds pyEpoch = the python epoch from time.time()
def _combine(self, other, conn='and'): """ OR and AND will create a new F, with the filters from both F objects combined with the connector `conn`. """ f = F() self_filters = copy.deepcopy(self.filters) other_filters = copy.deepcopy(other.filters) if not self.filters: f.filters = other_filters elif not other.filters: f.filters = self_filters elif conn in self.filters[0]: f.filters = self_filters f.filters[0][conn].extend(other_filters) elif conn in other.filters[0]: f.filters = other_filters f.filters[0][conn].extend(self_filters) else: f.filters = [{conn: self_filters + other_filters}] return f
OR and AND will create a new F, with the filters from both F objects combined with the connector `conn`.
def init_app(self, app): """Configures the specified Flask app to enforce SSL.""" app.config.setdefault('SSLIFY_AGE', self.defaults['age']) app.config.setdefault('SSLIFY_SUBDOMAINS', self.defaults['subdomains']) app.config.setdefault('SSLIFY_PERMANENT', self.defaults['permanent']) app.config.setdefault('SSLIFY_SKIPS', self.defaults['skips']) app.before_request(self.redirect_to_ssl) app.after_request(self.set_hsts_header)
Configures the specified Flask app to enforce SSL.
def getlist(self, section, option, *, raw=False, vars=None, fallback=None): """Return the [section] option values as a list. The list items must be delimited with commas and/or newlines. """ val = self.get(section, option, raw=raw, vars=vars, fallback=fallback) values = [] if val: for line in val.split("\n"): values += [s.strip() for s in line.split(",")] return values
Return the [section] option values as a list. The list items must be delimited with commas and/or newlines.
def get_search_engine(index=None): """ Returns the desired implementor (defined in settings) """ search_engine_class = _load_class(getattr(settings, "SEARCH_ENGINE", None), None) return search_engine_class(index=index) if search_engine_class else None
Returns the desired implementor (defined in settings)
def IsDirectory(self): """Determines if the file entry is a directory. Returns: bool: True if the file entry is a directory. """ if self._stat_object is None: self._stat_object = self._GetStat() if self._stat_object is not None: self.entry_type = self._stat_object.type return self.entry_type == definitions.FILE_ENTRY_TYPE_DIRECTORY
Determines if the file entry is a directory. Returns: bool: True if the file entry is a directory.
def _create_download_failed_message(exception, url): """ Creates message describing why download has failed :param exception: Exception raised during download :type exception: Exception :param url: An URL from where download was attempted :type url: str :return: Error message :rtype: str """ message = 'Failed to download from:\n{}\nwith {}:\n{}'.format(url, exception.__class__.__name__, exception) if _is_temporal_problem(exception): if isinstance(exception, requests.ConnectionError): message += '\nPlease check your internet connection and try again.' else: message += '\nThere might be a problem in connection or the server failed to process ' \ 'your request. Please try again.' elif isinstance(exception, requests.HTTPError): try: server_message = '' for elem in decode_data(exception.response.content, MimeType.XML): if 'ServiceException' in elem.tag or 'Message' in elem.tag: server_message += elem.text.strip('\n\t ') except ElementTree.ParseError: server_message = exception.response.text message += '\nServer response: "{}"'.format(server_message) return message
Creates message describing why download has failed :param exception: Exception raised during download :type exception: Exception :param url: An URL from where download was attempted :type url: str :return: Error message :rtype: str
def set_args(self, args, unknown_args=None): """ Configure job, based on the arguments provided. """ if unknown_args is None: unknown_args = [] self.logger.setLevel(getattr(logging, args.log_level)) parent = hdfs.path.dirname(hdfs.path.abspath(args.output.rstrip("/"))) self.remote_wd = hdfs.path.join( parent, utils.make_random_str(prefix="pydoop_submit_") ) self.remote_exe = hdfs.path.join(self.remote_wd, str(uuid.uuid4())) self.properties[JOB_NAME] = args.job_name or 'pydoop' self.properties[IS_JAVA_RR] = ( 'false' if args.do_not_use_java_record_reader else 'true' ) self.properties[IS_JAVA_RW] = ( 'false' if args.do_not_use_java_record_writer else 'true' ) self.properties[JOB_REDUCES] = args.num_reducers if args.job_name: self.properties[JOB_NAME] = args.job_name self.properties.update(args.job_conf or {}) self.__set_files_to_cache(args) self.__set_archives_to_cache(args) self.requested_env = self._env_arg_to_dict(args.set_env or []) self.args = args self.unknown_args = unknown_args
Configure job, based on the arguments provided.
def execute( mapchete_files, zoom=None, bounds=None, point=None, wkt_geometry=None, tile=None, overwrite=False, multi=None, input_file=None, logfile=None, verbose=False, no_pbar=False, debug=False, max_chunksize=None, vrt=False, idx_out_dir=None ): """Execute a Mapchete process.""" multi = multi if multi else cpu_count() mode = "overwrite" if overwrite else "continue" # send verbose messages to /dev/null if not activated if debug or not verbose: verbose_dst = open(os.devnull, 'w') else: verbose_dst = sys.stdout for mapchete_file in mapchete_files: tqdm.tqdm.write("preparing to process %s" % mapchete_file, file=verbose_dst) with click_spinner.spinner(disable=debug) as spinner: # process single tile if tile: tile = raw_conf_process_pyramid(raw_conf(mapchete_file)).tile(*tile) with mapchete.open( mapchete_file, mode=mode, bounds=tile.bounds, zoom=tile.zoom, single_input_file=input_file ) as mp: spinner.stop() tqdm.tqdm.write("processing 1 tile", file=verbose_dst) # run process on tile for result in mp.batch_processor(tile=tile): utils.write_verbose_msg(result, dst=verbose_dst) tqdm.tqdm.write( "processing %s finished" % mapchete_file, file=verbose_dst ) # write VRT index if vrt: tqdm.tqdm.write("creating VRT", file=verbose_dst) for tile in tqdm.tqdm( zoom_index_gen( mp=mp, zoom=tile.zoom, out_dir=( idx_out_dir if idx_out_dir else mp.config.output.path ), vrt=vrt, ), total=mp.count_tiles(tile.zoom, tile.zoom), unit="tile", disable=debug or no_pbar ): logger.debug("%s indexed", tile) tqdm.tqdm.write( "VRT(s) creation for %s finished" % mapchete_file, file=verbose_dst ) # process area else: with mapchete.open( mapchete_file, mode=mode, zoom=zoom, bounds=bounds_from_opts( wkt_geometry=wkt_geometry, point=point, bounds=bounds, raw_conf=raw_conf(mapchete_file) ), single_input_file=input_file ) as mp: spinner.stop() tiles_count = mp.count_tiles( min(mp.config.init_zoom_levels), max(mp.config.init_zoom_levels) ) tqdm.tqdm.write( "processing %s tile(s) on %s worker(s)" % (tiles_count, multi), file=verbose_dst ) # run process on tiles for process_info in tqdm.tqdm( mp.batch_processor( multi=multi, zoom=zoom, max_chunksize=max_chunksize ), total=tiles_count, unit="tile", disable=debug or no_pbar ): utils.write_verbose_msg(process_info, dst=verbose_dst) tqdm.tqdm.write( "processing %s finished" % mapchete_file, file=verbose_dst ) # write VRT index if vrt: tqdm.tqdm.write("creating VRT(s)", file=verbose_dst) for tile in tqdm.tqdm( zoom_index_gen( mp=mp, zoom=mp.config.init_zoom_levels, out_dir=( idx_out_dir if idx_out_dir else mp.config.output.path ), vrt=vrt ), total=mp.count_tiles( min(mp.config.init_zoom_levels), max(mp.config.init_zoom_levels) ), unit="tile", disable=debug or no_pbar ): logger.debug("%s indexed", tile) tqdm.tqdm.write( "VRT(s) creation for %s finished" % mapchete_file, file=verbose_dst )
Execute a Mapchete process.
def write_text_files(args, infilenames, outfilename): """Write text file(s) to disk. Keyword arguments: args -- program arguments (dict) infilenames -- names of user-inputted and/or downloaded files (list) outfilename -- name of output text file (str) """ if not outfilename.endswith('.txt'): outfilename = outfilename + '.txt' outfilename = overwrite_file_check(args, outfilename) all_text = [] # Text must be aggregated if writing to a single output file for i, infilename in enumerate(infilenames): parsed_text = get_parsed_text(args, infilename) if parsed_text: if args['multiple']: if not args['quiet']: print('Attempting to write to {0}.'.format(outfilename)) write_file(parsed_text, outfilename) elif args['single']: all_text += parsed_text # Newline added between multiple files being aggregated if len(infilenames) > 1 and i < len(infilenames) - 1: all_text.append('\n') # Write all text to a single output file if args['single'] and all_text: if not args['quiet']: print('Attempting to write {0} page(s) to {1}.' .format(len(infilenames), outfilename)) write_file(all_text, outfilename)
Write text file(s) to disk. Keyword arguments: args -- program arguments (dict) infilenames -- names of user-inputted and/or downloaded files (list) outfilename -- name of output text file (str)
def _write_superbox(self, fptr, box_id): """Write a superbox. Parameters ---------- fptr : file or file object Superbox (box of boxes) to be written to this file. box_id : bytes 4-byte sequence that identifies the superbox. """ # Write the contained boxes, then come back and write the length. orig_pos = fptr.tell() fptr.write(struct.pack('>I4s', 0, box_id)) for box in self.box: box.write(fptr) end_pos = fptr.tell() fptr.seek(orig_pos) fptr.write(struct.pack('>I', end_pos - orig_pos)) fptr.seek(end_pos)
Write a superbox. Parameters ---------- fptr : file or file object Superbox (box of boxes) to be written to this file. box_id : bytes 4-byte sequence that identifies the superbox.
def _raise_glfw_errors_as_exceptions(error_code, description): """ Default error callback that raises GLFWError exceptions for glfw errors. Set an alternative error callback or set glfw.ERROR_REPORTING to False to disable this behavior. """ global ERROR_REPORTING if ERROR_REPORTING: message = "(%d) %s" % (error_code, description) raise GLFWError(message)
Default error callback that raises GLFWError exceptions for glfw errors. Set an alternative error callback or set glfw.ERROR_REPORTING to False to disable this behavior.
def json_schema_type(schema_file: str, **kwargs) -> typing.Type: """Create a :class:`~doctor.types.JsonSchema` type. This function will automatically load the schema and set it as an attribute of the class along with the description and example. :param schema_file: The full path to the json schema file to load. :param kwargs: Can include any attribute defined in :class:`~doctor.types.JsonSchema` """ # Importing here to avoid circular dependencies from doctor.resource import ResourceSchema schema = ResourceSchema.from_file(schema_file) kwargs['schema'] = schema # Look up the description, example and type in the schema. definition_key = kwargs.get('definition_key') if definition_key: params = [definition_key] request_schema = schema._create_request_schema(params, params) try: definition = request_schema['definitions'][definition_key] except KeyError: raise TypeSystemError( 'Definition `{}` is not defined in the schema.'.format( definition_key)) description = get_value_from_schema( schema, definition, 'description', definition_key) example = get_value_from_schema( schema, definition, 'example', definition_key) json_type = get_value_from_schema( schema, definition, 'type', definition_key) json_type, native_type = get_types(json_type) kwargs['description'] = description kwargs['example'] = example kwargs['json_type'] = json_type kwargs['native_type'] = native_type else: try: kwargs['description'] = schema.schema['description'] except KeyError: raise TypeSystemError('Schema is missing a description.') try: json_type = schema.schema['type'] except KeyError: raise TypeSystemError('Schema is missing a type.') json_type, native_type = get_types(json_type) kwargs['json_type'] = json_type kwargs['native_type'] = native_type try: kwargs['example'] = schema.schema['example'] except KeyError: # Attempt to load from properties, if defined. if schema.schema.get('properties'): example = {} for prop, definition in schema.schema['properties'].items(): example[prop] = get_value_from_schema( schema, definition, 'example', 'root') kwargs['example'] = example else: raise TypeSystemError('Schema is missing an example.') return type('JsonSchema', (JsonSchema,), kwargs)
Create a :class:`~doctor.types.JsonSchema` type. This function will automatically load the schema and set it as an attribute of the class along with the description and example. :param schema_file: The full path to the json schema file to load. :param kwargs: Can include any attribute defined in :class:`~doctor.types.JsonSchema`
def closedopen(lower_value, upper_value): """Helper function to construct an interval object with a closed lower and open upper. For example: >>> closedopen(100.2, 800.9) [100.2, 800.9) """ return Interval(Interval.CLOSED, lower_value, upper_value, Interval.OPEN)
Helper function to construct an interval object with a closed lower and open upper. For example: >>> closedopen(100.2, 800.9) [100.2, 800.9)
def detect_pattern_format(pattern_filename, encoding, on_word_boundaries): ''' Automatically detects the pattern file format, and determines whether the Aho-Corasick string matching should pay attention to word boundaries or not. Arguments: - `pattern_filename`: - `encoding`: - `on_word_boundaries`: ''' tsv = True boundaries = on_word_boundaries with open_file(pattern_filename) as input_file: for line in input_file: line = line.decode(encoding) if line.count('\t') != 1: tsv = False if '\\b' in line: boundaries = True if boundaries and not tsv: break return tsv, boundaries
Automatically detects the pattern file format, and determines whether the Aho-Corasick string matching should pay attention to word boundaries or not. Arguments: - `pattern_filename`: - `encoding`: - `on_word_boundaries`:
def get_couchdb_admins(): ''' Return the actual CouchDB admins ''' user_list = [] req = curl_couchdb('/_config/admins/') for user in req.json().keys(): user_list.append(user) return user_list
Return the actual CouchDB admins
def calc_effective_conductivity(self, inlets=None, outlets=None, domain_area=None, domain_length=None): r""" This calculates the effective electrical conductivity. Parameters ---------- inlets : array_like The pores where the inlet voltage boundary conditions were applied. If not given an attempt is made to infer them from the algorithm. outlets : array_like The pores where the outlet voltage boundary conditions were applied. If not given an attempt is made to infer them from the algorithm. domain_area : scalar, optional The area of the inlet (and outlet) boundary faces. If not given then an attempt is made to estimate it, but it is usually underestimated. domain_length : scalar, optional The length of the domain between the inlet and outlet boundary faces. If not given then an attempt is made to estimate it, but it is usually underestimated. Notes ----- The area and length of the domain are found using the bounding box around the inlet and outlet pores which do not necessarily lie on the edge of the domain, resulting in underestimation of sizes. """ return self._calc_eff_prop(inlets=inlets, outlets=outlets, domain_area=domain_area, domain_length=domain_length)
r""" This calculates the effective electrical conductivity. Parameters ---------- inlets : array_like The pores where the inlet voltage boundary conditions were applied. If not given an attempt is made to infer them from the algorithm. outlets : array_like The pores where the outlet voltage boundary conditions were applied. If not given an attempt is made to infer them from the algorithm. domain_area : scalar, optional The area of the inlet (and outlet) boundary faces. If not given then an attempt is made to estimate it, but it is usually underestimated. domain_length : scalar, optional The length of the domain between the inlet and outlet boundary faces. If not given then an attempt is made to estimate it, but it is usually underestimated. Notes ----- The area and length of the domain are found using the bounding box around the inlet and outlet pores which do not necessarily lie on the edge of the domain, resulting in underestimation of sizes.
def iris(display=False): """ Return the classic iris data in a nice package. """ d = sklearn.datasets.load_iris() df = pd.DataFrame(data=d.data, columns=d.feature_names) # pylint: disable=E1101 if display: return df, [d.target_names[v] for v in d.target] # pylint: disable=E1101 else: return df, d.target
Return the classic iris data in a nice package.
def check_physical(self, line): """Run all physical checks on a raw input line.""" self.physical_line = line for name, check, argument_names in self._physical_checks: self.init_checker_state(name, argument_names) result = self.run_check(check, argument_names) if result is not None: (offset, text) = result self.report_error(self.line_number, offset, text, check) if text[:4] == 'E101': self.indent_char = line[0]
Run all physical checks on a raw input line.
def get_resources_by_search(self, resource_query, resource_search): """Gets the search results matching the given search query using the given search. arg: resource_query (osid.resource.ResourceQuery): the resource query arg: resource_search (osid.resource.ResourceSearch): the resource search return: (osid.resource.ResourceSearchResults) - the resource search results raise: NullArgument - ``resource_query`` or ``resource_search`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``resource_query`` or ``resource_search`` is not of this service *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceSearchSession.get_resources_by_search_template # Copied from osid.resource.ResourceQuerySession.get_resources_by_query_template and_list = list() or_list = list() for term in resource_query._query_terms: and_list.append({term: resource_query._query_terms[term]}) for term in resource_query._keyword_terms: or_list.append({term: resource_query._keyword_terms[term]}) if resource_search._id_list is not None: identifiers = [ObjectId(i.identifier) for i in resource_search._id_list] and_list.append({'_id': {'$in': identifiers}}) if or_list: and_list.append({'$or': or_list}) view_filter = self._view_filter() if view_filter: and_list.append(view_filter) if and_list: query_terms = {'$and': and_list} collection = JSONClientValidated('resource', collection='Resource', runtime=self._runtime) if resource_search.start is not None and resource_search.end is not None: result = collection.find(query_terms)[resource_search.start:resource_search.end] else: result = collection.find(query_terms) return searches.ResourceSearchResults(result, dict(resource_query._query_terms), runtime=self._runtime)
Gets the search results matching the given search query using the given search. arg: resource_query (osid.resource.ResourceQuery): the resource query arg: resource_search (osid.resource.ResourceSearch): the resource search return: (osid.resource.ResourceSearchResults) - the resource search results raise: NullArgument - ``resource_query`` or ``resource_search`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``resource_query`` or ``resource_search`` is not of this service *compliance: mandatory -- This method must be implemented.*
def __replace_capitalise(sentence): """here we replace all instances of #CAPITALISE and cap the next word. ############ #NOTE: Buggy as hell, as it doesn't account for words that are already #capitalized ############ :param _sentence: """ if sentence is not None: while sentence.find('#CAPITALISE') != -1: cap_index = _sentence.find('#CAPITALISE') part1 = sentence[:cap_index] part2 = sentence[cap_index + 12:cap_index + 13] part3 = sentence[cap_index + 13:] if part2 in "abcdefghijklmnopqrstuvwxyz": sentence = part1 + part2.capitalize() + part3 else: sentence = part1 + part2 + part3 if sentence.find('#CAPITALISE') == -1: return sentence else: return sentence
here we replace all instances of #CAPITALISE and cap the next word. ############ #NOTE: Buggy as hell, as it doesn't account for words that are already #capitalized ############ :param _sentence:
def _inject_cookie_message(self, msg): """Inject the first message, which is the document cookie, for authentication.""" if isinstance(msg, unicode): # Cookie can't constructor doesn't accept unicode strings for some reason msg = msg.encode('utf8', 'replace') try: self.request._cookies = Cookie.SimpleCookie(msg) except: logging.warn("couldn't parse cookie string: %s",msg, exc_info=True)
Inject the first message, which is the document cookie, for authentication.
def find_family(self, pattern=r".*", flags=0, node=None): """ Returns the Nodes from given family. :param pattern: Matching pattern. :type pattern: unicode :param flags: Matching regex flags. :type flags: int :param node: Node to start walking from. :type node: AbstractNode or AbstractCompositeNode or Object :return: Family nodes. :rtype: list """ return [node for node in foundations.walkers.nodes_walker(node or self) if re.search(pattern, node.family, flags)]
Returns the Nodes from given family. :param pattern: Matching pattern. :type pattern: unicode :param flags: Matching regex flags. :type flags: int :param node: Node to start walking from. :type node: AbstractNode or AbstractCompositeNode or Object :return: Family nodes. :rtype: list
def rest_put(url, data, timeout, show_error=False): '''Call rest put method''' try: response = requests.put(url, headers={'Accept': 'application/json', 'Content-Type': 'application/json'},\ data=data, timeout=timeout) return response except Exception as exception: if show_error: print_error(exception) return None
Call rest put method
def _gen_shuffles(self): ''' Used internally to build a list for mapping between a random number and a song index. ''' # The current metasong index si = 0 # The shuffle mapper list self.shuffles = [] # Go through all our songs... for song in self.loop: # And add them to the list as many times as they say to. for i in range(song[1]): self.shuffles.append(si) si += 1
Used internally to build a list for mapping between a random number and a song index.
def write_string(self, s, codec): """ Write string encoding it with codec into stream """ for i in range(0, len(s), self.bufsize): chunk = s[i:i + self.bufsize] buf, consumed = codec.encode(chunk) assert consumed == len(chunk) self.write(buf)
Write string encoding it with codec into stream
def bbox(self, out_crs=None): """ Return data bounding box. Parameters ---------- out_crs : ``rasterio.crs.CRS`` rasterio CRS object (default: CRS of process pyramid) Returns ------- bounding box : geometry Shapely geometry object """ return reproject_geometry( box(*self._bounds), src_crs=self.td_pyramid.crs, dst_crs=self.pyramid.crs if out_crs is None else out_crs )
Return data bounding box. Parameters ---------- out_crs : ``rasterio.crs.CRS`` rasterio CRS object (default: CRS of process pyramid) Returns ------- bounding box : geometry Shapely geometry object
def query(self, sql, timeout=10): """ Submit a query and return results. :param sql: string :param timeout: int :return: pydrill.client.ResultQuery """ if not sql: raise QueryError('No query passed to drill.') result = ResultQuery(*self.perform_request(**{ 'method': 'POST', 'url': '/query.json', 'body': { "queryType": "SQL", "query": sql }, 'params': { 'request_timeout': timeout } })) return result
Submit a query and return results. :param sql: string :param timeout: int :return: pydrill.client.ResultQuery
def stop(self): """Send signal to stop the current stream playback""" self._response['shouldEndSession'] = True self._response['action']['audio']['interface'] = 'stop' self._response['action']['audio']['sources'] = [] return self
Send signal to stop the current stream playback
def members(self): """获取小组所有成员的信息列表""" all_members = [] for page in range(1, self.max_page() + 1): all_members.extend(self.single_page_members(page)) return all_members
获取小组所有成员的信息列表
def get_cards(self, **query_params): ''' Get all cards this member is attached to. Return a list of Card objects. Returns: list(Card): Return all cards this member is attached to ''' cards = self.get_cards_json(self.base_uri, query_params=query_params) cards_list = [] for card_json in cards: cards_list.append(self.create_card(card_json)) return cards_list
Get all cards this member is attached to. Return a list of Card objects. Returns: list(Card): Return all cards this member is attached to
def Main(): """The main program function. Returns: bool: True if successful or False if not. """ argument_parser = argparse.ArgumentParser(description=( 'Calculates a message digest hash for every file in a directory or ' 'storage media image.')) argument_parser.add_argument( '--output_file', '--output-file', dest='output_file', action='store', metavar='source.hashes', default=None, help=( 'path of the output file, default is to output to stdout.')) argument_parser.add_argument( 'source', nargs='?', action='store', metavar='image.raw', default=None, help='path of the directory or storage media image.') options = argument_parser.parse_args() if not options.source: print('Source value is missing.') print('') argument_parser.print_help() print('') return False logging.basicConfig( level=logging.INFO, format='[%(levelname)s] %(message)s') if options.output_file: output_writer = FileOutputWriter(options.output_file) else: output_writer = StdoutWriter() try: output_writer.Open() except IOError as exception: print('Unable to open output writer with error: {0!s}.'.format( exception)) print('') return False return_value = True mediator = command_line.CLIVolumeScannerMediator() recursive_hasher = RecursiveHasher(mediator=mediator) try: base_path_specs = recursive_hasher.GetBasePathSpecs(options.source) if not base_path_specs: print('No supported file system found in source.') print('') return False recursive_hasher.CalculateHashes(base_path_specs, output_writer) print('') print('Completed.') except errors.ScannerError as exception: return_value = False print('') print('[ERROR] {0!s}'.format(exception)) except errors.UserAbort as exception: return_value = False print('') print('Aborted.') output_writer.Close() return return_value
The main program function. Returns: bool: True if successful or False if not.
def format_attributes_json(self): """Convert the Attributes object to json format.""" attributes_json = {} for key, value in self.attributes.items(): key = utils.check_str_length(key)[0] value = _format_attribute_value(value) if value is not None: attributes_json[key] = value result = { 'attributeMap': attributes_json } return result
Convert the Attributes object to json format.
def check(self): """ Run inadyn from the commandline to test the configuration. To be run like: fab role inadyn.check """ self._validate_settings() r = self.local_renderer r.env.alias = r.env.aliases[0] r.sudo(r.env.check_command_template)
Run inadyn from the commandline to test the configuration. To be run like: fab role inadyn.check
def execute(self, input_data): ''' Execute the ViewMemoryDeep worker ''' # Aggregate the output from all the memory workers, clearly this could be kewler output = input_data['view_memory'] output['tables'] = {} for data in [input_data[key] for key in ViewMemoryDeep.dependencies]: for name,table in data['tables'].iteritems(): output['tables'].update({name: table}) return output
Execute the ViewMemoryDeep worker
def load_and_preprocess_imdb_data(n_gram=None): """Load IMDb data and augment with hashed n-gram features.""" X_train, y_train, X_test, y_test = tl.files.load_imdb_dataset(nb_words=VOCAB_SIZE) if n_gram is not None: X_train = np.array([augment_with_ngrams(x, VOCAB_SIZE, N_BUCKETS, n=n_gram) for x in X_train]) X_test = np.array([augment_with_ngrams(x, VOCAB_SIZE, N_BUCKETS, n=n_gram) for x in X_test]) return X_train, y_train, X_test, y_test
Load IMDb data and augment with hashed n-gram features.
def available(name): ''' Returns ``True`` if the specified service is available, otherwise returns ``False``. We look up the name with the svcs command to get back the FMRI This allows users to use simpler service names CLI Example: .. code-block:: bash salt '*' service.available net-snmp ''' cmd = '/usr/bin/svcs -H -o FMRI {0}'.format(name) name = __salt__['cmd.run'](cmd, python_shell=False) return name in get_all()
Returns ``True`` if the specified service is available, otherwise returns ``False``. We look up the name with the svcs command to get back the FMRI This allows users to use simpler service names CLI Example: .. code-block:: bash salt '*' service.available net-snmp
def drag_and_drop_by_offset(self, source, xoffset, yoffset): """ Holds down the left mouse button on the source element, then moves to the target offset and releases the mouse button. :Args: - source: The element to mouse down. - xoffset: X offset to move to. - yoffset: Y offset to move to. """ self.click_and_hold(source) self.move_by_offset(xoffset, yoffset) self.release() return self
Holds down the left mouse button on the source element, then moves to the target offset and releases the mouse button. :Args: - source: The element to mouse down. - xoffset: X offset to move to. - yoffset: Y offset to move to.
def _parse_configs(self, config): """Builds a dict with information to connect to Clusters. Parses the list of configuration dictionaries passed by the user and builds an internal dict (_clusters) that holds information for creating Clients connecting to Clusters and matching database names. Args: config: A list of dictionaries containing connecting and identification information about Clusters. A dict has the following structure: {label: {host, port, read_preference, dbpath}}. Raises: Exception('No configuration provided'): no configuration provided. """ for config_dict in config: label = config_dict.keys()[0] cfg = config_dict[label] # Transform dbpath to something digestable by regexp. dbpath = cfg['dbpath'] pattern = self._parse_dbpath(dbpath) read_preference = cfg.get('read_preference', 'primary').upper() read_preference = self._get_read_preference(read_preference) # Put all parameters that could be passed to pymongo.MongoClient # in a separate dict, to ease MongoClient creation. cluster_config = { 'params': { 'host': cfg['host'], 'port': cfg['port'], 'read_preference': read_preference, 'replicaSet': cfg.get('replicaSet') }, 'pattern': pattern, 'label': label } self._clusters.append(cluster_config)
Builds a dict with information to connect to Clusters. Parses the list of configuration dictionaries passed by the user and builds an internal dict (_clusters) that holds information for creating Clients connecting to Clusters and matching database names. Args: config: A list of dictionaries containing connecting and identification information about Clusters. A dict has the following structure: {label: {host, port, read_preference, dbpath}}. Raises: Exception('No configuration provided'): no configuration provided.
def _get_ema(cls, df, column, windows): """ get exponential moving average :param df: data :param column: column to calculate :param windows: collection of window of exponential moving average :return: None """ window = cls.get_only_one_positive_int(windows) column_name = '{}_{}_ema'.format(column, window) if len(df[column]) > 0: df[column_name] = df[column].ewm( ignore_na=False, span=window, min_periods=0, adjust=True).mean() else: df[column_name] = []
get exponential moving average :param df: data :param column: column to calculate :param windows: collection of window of exponential moving average :return: None
def update_port_statuses_cfg(self, context, port_ids, status): """Update the operational statuses of a list of router ports. This is called by the Cisco cfg agent to update the status of a list of ports. :param context: contains user information :param port_ids: list of ids of all the ports for the given status :param status: PORT_STATUS_ACTIVE/PORT_STATUS_DOWN. """ self._l3plugin.update_router_port_statuses(context, port_ids, status)
Update the operational statuses of a list of router ports. This is called by the Cisco cfg agent to update the status of a list of ports. :param context: contains user information :param port_ids: list of ids of all the ports for the given status :param status: PORT_STATUS_ACTIVE/PORT_STATUS_DOWN.
def create(): """Create a new database with information about the films in the specified directory or directories.""" if not all(map(os.path.isdir, ARGS.directory)): exit('Error: One or more of the specified directories does not exist.') with sqlite3.connect(ARGS.database) as connection: connection.text_factory = str cursor = connection.cursor() cursor.execute('DROP TABLE IF EXISTS Movies') cursor.execute('''CREATE TABLE Movies(name TEXT, path TEXT, size TEXT, files BLOB)''') for dir in ARGS.directory: cursor.executemany('INSERT INTO Movies VALUES(?, ?, ?, ?)', local_data(dir))
Create a new database with information about the films in the specified directory or directories.
def convert(word): """This method converts given `word` to UTF-8 encoding and `bytes` type for the SWIG wrapper.""" if six.PY2: if isinstance(word, unicode): return word.encode('utf-8') else: return word.decode('utf-8').encode('utf-8') # make sure it is real utf8, otherwise complain else: # ==> Py3 if isinstance(word, bytes): return word.decode('utf-8') # bytes must be in utf8 return word
This method converts given `word` to UTF-8 encoding and `bytes` type for the SWIG wrapper.
def begin(self): """Load variables from checkpoint. New model variables have the following name foramt: new_model_scope/old_model_scope/xxx/xxx:0 To find the map of name to variable, need to strip the new_model_scope and then match the old_model_scope and remove the suffix :0. """ variables_to_restore = tf.contrib.framework.get_variables_to_restore( include=self._include, exclude=self._exclude) # remove new_model_scope from variable name prefix assignment_map = {variable.name[len(self._new_model_scope):]: variable for variable in variables_to_restore if variable.name.startswith(self._new_model_scope)} # remove :0 from variable name suffix assignment_map = {name.split(":")[0]: variable for name, variable in six.iteritems(assignment_map) if name.startswith(self._old_model_scope)} self._assignment_map = assignment_map tf.logging.info("restoring %d variables from checkpoint %s"%( len(assignment_map), self._checkpoint_path)) tf.train.init_from_checkpoint(self._checkpoint_path, self._assignment_map)
Load variables from checkpoint. New model variables have the following name foramt: new_model_scope/old_model_scope/xxx/xxx:0 To find the map of name to variable, need to strip the new_model_scope and then match the old_model_scope and remove the suffix :0.
def has_entities(status): """ Returns true if a Status object has entities. Args: status: either a tweepy.Status object or a dict returned from Twitter API """ try: if sum(len(v) for v in status.entities.values()) > 0: return True except AttributeError: if sum(len(v) for v in status['entities'].values()) > 0: return True return False
Returns true if a Status object has entities. Args: status: either a tweepy.Status object or a dict returned from Twitter API
def token_is_correct(self, token): """ Подходит ли токен, для генерации текста. Допускаются русские слова, знаки препинания и символы начала и конца. """ if self.is_rus_word(token): return True elif self.ONLY_MARKS.search(token): return True elif self.END_TOKENS.search(token): return True elif token in "$^": return True return False
Подходит ли токен, для генерации текста. Допускаются русские слова, знаки препинания и символы начала и конца.
def _get_bound_pressure_height(pressure, bound, heights=None, interpolate=True): """Calculate the bounding pressure and height in a layer. Given pressure, optional heights, and a bound, return either the closest pressure/height or interpolated pressure/height. If no heights are provided, a standard atmosphere is assumed. Parameters ---------- pressure : `pint.Quantity` Atmospheric pressures bound : `pint.Quantity` Bound to retrieve (in pressure or height) heights : `pint.Quantity`, optional Atmospheric heights associated with the pressure levels. Defaults to using heights calculated from ``pressure`` assuming a standard atmosphere. interpolate : boolean, optional Interpolate the bound or return the nearest. Defaults to True. Returns ------- `pint.Quantity` The bound pressure and height. """ # Make sure pressure is monotonically decreasing sort_inds = np.argsort(pressure)[::-1] pressure = pressure[sort_inds] if heights is not None: heights = heights[sort_inds] # Bound is given in pressure if bound.dimensionality == {'[length]': -1.0, '[mass]': 1.0, '[time]': -2.0}: # If the bound is in the pressure data, we know the pressure bound exactly if bound in pressure: bound_pressure = bound # If we have heights, we know the exact height value, otherwise return standard # atmosphere height for the pressure if heights is not None: bound_height = heights[pressure == bound_pressure] else: bound_height = pressure_to_height_std(bound_pressure) # If bound is not in the data, return the nearest or interpolated values else: if interpolate: bound_pressure = bound # Use the user specified bound if heights is not None: # Interpolate heights from the height data bound_height = log_interpolate_1d(bound_pressure, pressure, heights) else: # If not heights given, use the standard atmosphere bound_height = pressure_to_height_std(bound_pressure) else: # No interpolation, find the closest values idx = (np.abs(pressure - bound)).argmin() bound_pressure = pressure[idx] if heights is not None: bound_height = heights[idx] else: bound_height = pressure_to_height_std(bound_pressure) # Bound is given in height elif bound.dimensionality == {'[length]': 1.0}: # If there is height data, see if we have the bound or need to interpolate/find nearest if heights is not None: if bound in heights: # Bound is in the height data bound_height = bound bound_pressure = pressure[heights == bound] else: # Bound is not in the data if interpolate: bound_height = bound # Need to cast back to the input type since interp (up to at least numpy # 1.13 always returns float64. This can cause upstream users problems, # resulting in something like np.append() to upcast. bound_pressure = np.interp(np.atleast_1d(bound), heights, pressure).astype(bound.dtype) * pressure.units else: idx = (np.abs(heights - bound)).argmin() bound_pressure = pressure[idx] bound_height = heights[idx] else: # Don't have heights, so assume a standard atmosphere bound_height = bound bound_pressure = height_to_pressure_std(bound) # If interpolation is on, this is all we need, if not, we need to go back and # find the pressure closest to this and refigure the bounds if not interpolate: idx = (np.abs(pressure - bound_pressure)).argmin() bound_pressure = pressure[idx] bound_height = pressure_to_height_std(bound_pressure) # Bound has invalid units else: raise ValueError('Bound must be specified in units of length or pressure.') # If the bound is out of the range of the data, we shouldn't extrapolate if not (_greater_or_close(bound_pressure, np.nanmin(pressure) * pressure.units) and _less_or_close(bound_pressure, np.nanmax(pressure) * pressure.units)): raise ValueError('Specified bound is outside pressure range.') if heights is not None: if not (_less_or_close(bound_height, np.nanmax(heights) * heights.units) and _greater_or_close(bound_height, np.nanmin(heights) * heights.units)): raise ValueError('Specified bound is outside height range.') return bound_pressure, bound_height
Calculate the bounding pressure and height in a layer. Given pressure, optional heights, and a bound, return either the closest pressure/height or interpolated pressure/height. If no heights are provided, a standard atmosphere is assumed. Parameters ---------- pressure : `pint.Quantity` Atmospheric pressures bound : `pint.Quantity` Bound to retrieve (in pressure or height) heights : `pint.Quantity`, optional Atmospheric heights associated with the pressure levels. Defaults to using heights calculated from ``pressure`` assuming a standard atmosphere. interpolate : boolean, optional Interpolate the bound or return the nearest. Defaults to True. Returns ------- `pint.Quantity` The bound pressure and height.
def within_radians(self, key, point, max_distance, min_distance=None): """ 增加查询条件,限制返回结果指定字段值的位置在某点的一段距离之内。 :param key: 查询条件字段名 :param point: 查询地理位置 :param max_distance: 最大距离限定(弧度) :param min_distance: 最小距离限定(弧度) :rtype: Query """ self.near(key, point) self._add_condition(key, '$maxDistance', max_distance) if min_distance is not None: self._add_condition(key, '$minDistance', min_distance) return self
增加查询条件,限制返回结果指定字段值的位置在某点的一段距离之内。 :param key: 查询条件字段名 :param point: 查询地理位置 :param max_distance: 最大距离限定(弧度) :param min_distance: 最小距离限定(弧度) :rtype: Query
def mean_imls(self): """ Compute the mean IMLs (Intensity Measure Level) for the given vulnerability function. :param vulnerability_function: the vulnerability function where the IMLs (Intensity Measure Level) are taken from. :type vuln_function: :py:class:`openquake.risklib.vulnerability_function.\ VulnerabilityFunction` """ return numpy.array( [max(0, self.imls[0] - (self.imls[1] - self.imls[0]) / 2.)] + [numpy.mean(pair) for pair in pairwise(self.imls)] + [self.imls[-1] + (self.imls[-1] - self.imls[-2]) / 2.])
Compute the mean IMLs (Intensity Measure Level) for the given vulnerability function. :param vulnerability_function: the vulnerability function where the IMLs (Intensity Measure Level) are taken from. :type vuln_function: :py:class:`openquake.risklib.vulnerability_function.\ VulnerabilityFunction`
def compute_checksum(self): """ Calculates checksums for a given file. """ if self._filename.startswith("s3://"): print("Warning: Did not perform client-side checksumming for file in S3. To be implemented.") pass else: checksumCalculator = self.ChecksumCalculator(self._filename) self._checksums = checksumCalculator.compute()
Calculates checksums for a given file.
def mode_run_common_obs(args, extra_args): """Observing mode processing mode of numina.""" # Loading observation result if exists loaded_obs = [] sessions = [] if args.session: for obfile in args.obsresult: _logger.info("session file from %r", obfile) with open(obfile) as fd: sess = yaml.load(fd) sessions.append(sess['session']) else: for obfile in args.obsresult: _logger.info("Loading observation results from %r", obfile) with open(obfile) as fd: sess = [] for doc in yaml.load_all(fd): enabled = doc.get('enabled', True) docid = doc['id'] requirements = doc.get('requirements', {}) sess.append(dict(id=docid, enabled=enabled, requirements=requirements)) if enabled: _logger.debug("load observation result with id %s", docid) else: _logger.debug("skip observation result with id %s", docid) loaded_obs.append(doc) sessions.append(sess) if args.reqs: _logger.info('reading control from %s', args.reqs) with open(args.reqs, 'r') as fd: loaded_data = yaml.load(fd) else: _logger.info('no control file') loaded_data = {} if extra_args.extra_control: _logger.info('extra control %s', extra_args.extra_control) loaded_data_extra = parse_as_yaml(extra_args.extra_control) else: loaded_data_extra = None control_format = loaded_data.get('version', 1) _logger.info('control format version %d', control_format) if control_format == 1: _backend = process_format_version_1(loaded_obs, loaded_data, loaded_data_extra, args.profilepath) datamanager = DataManager(args.basedir, args.datadir, _backend) datamanager.workdir_tmpl = "obsid{obsid}_work" datamanager.resultdir_tmpl = "obsid{obsid}_results" datamanager.serial_format = 'yaml' datamanager.result_file = 'result.yaml' datamanager.task_file = 'task.yaml' elif control_format == 2: _backend = process_format_version_2(loaded_obs, loaded_data, loaded_data_extra, args.profilepath) datamanager = DataManager(args.basedir, args.datadir, _backend) else: print('Unsupported format', control_format, 'in', args.reqs) sys.exit(1) # Start processing jobs = [] for session in sessions: for job in session: if job['enabled']: jobs.append(job) for job in jobs: # Directories with relevant data request = 'reduce' request_params = {} obid = job['id'] request_params['oblock_id'] = obid request_params["pipeline"] = args.pipe_name request_params["instrument_configuration"] = args.insconf logger_control = dict( default=DEFAULT_RECIPE_LOGGER, logfile='processing.log', format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", enabled=True ) request_params['logger_control'] = logger_control task = datamanager.backend.new_task(request, request_params) task.request = request task.request_params = request_params task.request_runinfo['runner'] = 'numina' task.request_runinfo['runner_version'] = __version__ _logger.info("procesing OB with id={}".format(obid)) workenv = datamanager.create_workenv(task) task.request_runinfo["results_dir"] = workenv.resultsdir_rel task.request_runinfo["work_dir"] = workenv.workdir_rel # Roll back to cwd after leaving the context with working_directory(workenv.datadir): obsres = datamanager.backend.obsres_from_oblock_id(obid, configuration=args.insconf) _logger.debug("pipeline from CLI is %r", args.pipe_name) pipe_name = args.pipe_name obsres.pipeline = pipe_name recipe = datamanager.backend.search_recipe_from_ob(obsres) _logger.debug('recipe class is %s', recipe.__class__) # Enable intermediate results by default _logger.debug('enable intermediate results') recipe.intermediate_results = True # Update runinfo _logger.debug('update recipe runinfo') recipe.runinfo['runner'] = 'numina' recipe.runinfo['runner_version'] = __version__ recipe.runinfo['task_id'] = task.id recipe.runinfo['data_dir'] = workenv.datadir recipe.runinfo['work_dir'] = workenv.workdir recipe.runinfo['results_dir'] = workenv.resultsdir _logger.debug('recipe created') try: rinput = recipe.build_recipe_input(obsres, datamanager.backend) except (ValueError, numina.exceptions.ValidationError) as err: _logger.error("During recipe input construction") _logger.error("%s", err) sys.exit(0) _logger.debug('recipe input created') # Show the actual inputs for key in recipe.requirements(): v = getattr(rinput, key) _logger.debug("recipe requires %r, value is %s", key, v) for req in recipe.products().values(): _logger.debug('recipe provides %s, %s', req.type.__class__.__name__, req.description) # Load recipe control and recipe parameters from file task.request_runinfo['instrument'] = obsres.instrument task.request_runinfo['mode'] = obsres.mode task.request_runinfo['recipe_class'] = recipe.__class__.__name__ task.request_runinfo['recipe_fqn'] = fully_qualified_name(recipe.__class__) task.request_runinfo['recipe_version'] = recipe.__version__ # Copy files if args.copy_files: _logger.debug('copy files to work directory') workenv.sane_work() workenv.copyfiles_stage1(obsres) workenv.copyfiles_stage2(rinput) workenv.adapt_obsres(obsres) completed_task = run_recipe(recipe=recipe, task=task, rinput=rinput, workenv=workenv, logger_control=logger_control) datamanager.store_task(completed_task) if args.dump_control: _logger.debug('dump control status') with open('control_dump.yaml', 'w') as fp: datamanager.backend.dump(fp)
Observing mode processing mode of numina.
def _invert(color, **kwargs): """ Returns the inverse (negative) of a color. The red, green, and blue values are inverted, while the opacity is left alone. """ col = ColorValue(color) args = [ 255.0 - col.value[0], 255.0 - col.value[1], 255.0 - col.value[2], col.value[3], ] inverted = ColorValue(args) return inverted
Returns the inverse (negative) of a color. The red, green, and blue values are inverted, while the opacity is left alone.
def list_types_poi(self, **kwargs): """Obtain a list of families, types and categories of POI. Args: lang (str): Language code (*es* or *en*). Returns: Status boolean and parsed response (list[ParkingPoiType]), or message string in case of error. """ # Endpoint parameters url_args = {'language': util.language_code(kwargs.get('lang'))} # Request result = self.make_request('list_poi_types', url_args) if not util.check_result(result): return False, result.get('message', 'UNKNOWN ERROR') # Parse values = util.response_list(result, 'Data') return True, [emtype.ParkingPoiType(**a) for a in values]
Obtain a list of families, types and categories of POI. Args: lang (str): Language code (*es* or *en*). Returns: Status boolean and parsed response (list[ParkingPoiType]), or message string in case of error.
def force_process_ordered(self): """ Take any messages from replica that have been ordered and process them, this should be done rarely, like before catchup starts so a more current LedgerStatus can be sent. can be called either 1. when node is participating, this happens just before catchup starts so the node can have the latest ledger status or 2. when node is not participating but a round of catchup is about to be started, here is forces all the replica ordered messages to be appended to the stashed ordered requests and the stashed ordered requests are processed with appropriate checks """ for instance_id, messages in self.replicas.take_ordereds_out_of_turn(): num_processed = 0 for message in messages: self.try_processing_ordered(message) num_processed += 1 logger.info('{} processed {} Ordered batches for instance {} ' 'before starting catch up' .format(self, num_processed, instance_id))
Take any messages from replica that have been ordered and process them, this should be done rarely, like before catchup starts so a more current LedgerStatus can be sent. can be called either 1. when node is participating, this happens just before catchup starts so the node can have the latest ledger status or 2. when node is not participating but a round of catchup is about to be started, here is forces all the replica ordered messages to be appended to the stashed ordered requests and the stashed ordered requests are processed with appropriate checks