positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def apply_patch(patchfile,cwd=None,posix=False,level=0): """call 'patch -p[level] [--posix] < arg1' posix mode is sometimes necessary. It keeps empty files so that dpkg-source removes their contents. """ if not os.path.exists(patchfile): raise RuntimeError('patchfile "%s" does not exist'%patchfile) fd = open(patchfile,mode='r') level_str = '-p%d'%level args = ['/usr/bin/patch',level_str] if posix: args.append('--posix') log.info('PATCH COMMAND: %s < %s', ' '.join(args), patchfile) log.info(' PATCHING in dir: %s', cwd) # print >> sys.stderr, 'PATCH COMMAND:',' '.join(args),'<',patchfile # print >> sys.stderr, ' PATCHING in dir:',cwd res = subprocess.Popen( args, cwd=cwd, stdin=fd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True ) returncode=None while returncode is None: returncode = res.poll() ready = select.select( [res.stdout,res.stderr],[],[],0.1) # XXX figure out how to do this without reading byte-by-byte if res.stdout in ready[0]: sys.stdout.write(res.stdout.read(1)) sys.stdout.flush() if res.stderr in ready[0]: sys.stderr.write(res.stderr.read(1)) sys.stderr.flush() # finish outputting file sys.stdout.write(res.stdout.read()) sys.stdout.flush() sys.stderr.write(res.stderr.read()) sys.stderr.flush() if returncode: log.error('ERROR running: %s', ' '.join(args)) log.error('ERROR in %s', cwd) # print >> sys.stderr, 'ERROR running: %s'%(' '.join(args),) # print >> sys.stderr, 'ERROR in',cwd raise RuntimeError('returncode %d'%returncode)
call 'patch -p[level] [--posix] < arg1' posix mode is sometimes necessary. It keeps empty files so that dpkg-source removes their contents.
def rank(self, n, mu, sigma, crit=.5, upper=10000, xtol=1): """%(super)s Additional Parameters ---------------------- {0} """ return _make_rank(self, n, mu, sigma, crit=crit, upper=upper, xtol=xtol)
%(super)s Additional Parameters ---------------------- {0}
async def cellAuthToHive(dirn, auth): ''' Migrate old cell Auth() data into a HiveAuth(). ''' logger.warning('migrating old cell auth to hive') path = os.path.join(dirn, 'auth.lmdb') lenv = lmdb.open(path, max_dbs=128) userdb = lenv.open_db(b'users') roledb = lenv.open_db(b'roles') migrated_roles = False migrated_users = False with lenv.begin() as xact: with xact.cursor(db=roledb) as curs: for lkey, lval in curs.iternext(): name = lkey.decode('utf8') info = s_msgpack.un(lval) logger.info(f'Migrating role: {name}') role = auth.getRoleByName(name) if role is None: logger.info(f'Creating role: {name}') role = await auth.addRole(name) rules = info.get('rules', ()) await role.setRules(rules) migrated_roles = True if not migrated_roles: # pragma: no cover logger.info('No roles were migrated.') with xact.cursor(db=userdb) as curs: for lkey, lval in curs.iternext(): name = lkey.decode('utf8') info = s_msgpack.un(lval) logger.info(f'Migrating user: {name}') user = auth.getUserByName(name) if user is None: logger.info(f'Creating user: {name}') user = await auth.addUser(name) if info.get('admin', False): await user.setAdmin(True) if info.get('locked', False): await user.setLocked(True) # set this directly since we only have the shadow shadow = info.get('shadow') if shadow is not None: await user.info.set('passwd', shadow) rules = info.get('rules', ()) await user.setRules(rules) for name in info.get('roles', ()): await user.grant(name) migrated_users = True if not migrated_users: # pragma: no cover logger.info('No users were migrated.') lenv.sync() lenv.close()
Migrate old cell Auth() data into a HiveAuth().
def cached_get(timeout, *params): """Decorator applied specifically to a view's get method""" def decorator(view_func): @wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view(view_or_request, *args, **kwargs): # The type of the request gets muddled when using a function based # decorator. We must use a function based decorator so it can be # used in urls.py. request = getattr(view_or_request, "request", view_or_request) if not hasattr(_thread_locals, "ultracache_request"): setattr(_thread_locals, "ultracache_request", request) # If request not GET or HEAD never cache if request.method.lower() not in ("get", "head"): return view_func(view_or_request, *args, **kwargs) # If request contains messages never cache l = 0 try: l = len(request._messages) except (AttributeError, TypeError): pass if l: return view_func(view_or_request, *args, **kwargs) # Compute a cache key li = [str(view_or_request.__class__), view_func.__name__] # request.get_full_path is implicitly added it no other request # path is provided. get_full_path includes the querystring and is # the more conservative approach but makes it trivially easy for a # request to bust through the cache. if not set(params).intersection(set(( "request.get_full_path()", "request.path", "request.path_info" ))): li.append(request.get_full_path()) if "django.contrib.sites" in settings.INSTALLED_APPS: li.append(get_current_site_pk(request)) # Pre-sort kwargs keys = list(kwargs.keys()) keys.sort() for key in keys: li.append("%s,%s" % (key, kwargs[key])) # Extend cache key with custom variables for param in params: if not isinstance(param, str): param = str(param) li.append(eval(param)) s = ":".join([str(l) for l in li]) hashed = hashlib.md5(s.encode("utf-8")).hexdigest() cache_key = "ucache-get-%s" % hashed cached = cache.get(cache_key, None) if cached is None: # The get view as outermost caller may bluntly set _ultracache request._ultracache = [] response = view_func(view_or_request, *args, **kwargs) content = None if isinstance(response, TemplateResponse): content = response.render().rendered_content elif isinstance(response, HttpResponse): content = response.content if content is not None: headers = getattr(response, "_headers", {}) cache.set( cache_key, {"content": content, "headers": headers}, timeout ) cache_meta(request, cache_key) else: response = HttpResponse(cached["content"]) # Headers has a non-obvious format for k, v in cached["headers"].items(): response[v[0]] = v[1] return response return _wrapped_view return decorator
Decorator applied specifically to a view's get method
def _create_metadata_converter_action(self): """Create action for showing metadata converter dialog.""" icon = resources_path('img', 'icons', 'show-metadata-converter.svg') self.action_metadata_converter = QAction( QIcon(icon), self.tr('InaSAFE Metadata Converter'), self.iface.mainWindow()) self.action_metadata_converter.setStatusTip(self.tr( 'Convert metadata from version 4.3 to version 3.5.')) self.action_metadata_converter.setWhatsThis(self.tr( 'Use this tool to convert metadata 4.3 to version 3.5')) self.action_metadata_converter.triggered.connect( self.show_metadata_converter) self.add_action( self.action_metadata_converter, add_to_toolbar=self.full_toolbar)
Create action for showing metadata converter dialog.
def _orientation_ok_to_bridge_contigs(self, start_hit, end_hit): '''Returns True iff the orientation of the hits means that the query contig of both hits can bridge the reference contigs of the hits''' assert start_hit.qry_name == end_hit.qry_name if start_hit.ref_name == end_hit.ref_name: return False if ( (self._is_at_ref_end(start_hit) and start_hit.on_same_strand()) or (self._is_at_ref_start(start_hit) and not start_hit.on_same_strand()) ): start_hit_ok = True else: start_hit_ok = False if ( (self._is_at_ref_start(end_hit) and end_hit.on_same_strand()) or (self._is_at_ref_end(end_hit) and not end_hit.on_same_strand()) ): end_hit_ok = True else: end_hit_ok = False return start_hit_ok and end_hit_ok
Returns True iff the orientation of the hits means that the query contig of both hits can bridge the reference contigs of the hits
def get_value(self, **kwargs): """Return the value for a specific key.""" key = tuple(kwargs[group] for group in self.groups) if key not in self.data: self.data[key] = 0 return self.data[key]
Return the value for a specific key.
def get_one_parameter(self, regex_exp, parameters): """ Get three parameters from a given regex expression Raise an exception if more than three were found :param regex_exp: :param parameters: :return: """ Rx, other = self.get_parameters(regex_exp, parameters) if other is not None and other.strip(): raise iarm.exceptions.ParsingError("Extra arguments found: {}".format(other)) return Rx.upper()
Get three parameters from a given regex expression Raise an exception if more than three were found :param regex_exp: :param parameters: :return:
def silent_execute(self, code): """Execute code in the kernel without increasing the prompt""" try: self.kernel_client.execute(to_text_string(code), silent=True) except AttributeError: pass
Execute code in the kernel without increasing the prompt
def check_out_item(self, expiration): """Get the highest-priority item out of this queue. Returns the item, or None if no items are available. The item must be either ``return_item()`` or ``renew_item()`` before ``expiration`` seconds pass, or it will become available to future callers. The item will be marked as being owned by ``worker_id``. """ conn = redis.StrictRedis(connection_pool=self.pool) self._run_expiration(conn) expiration += time.time() script = conn.register_script(""" local item = redis.call("zrevrange", KEYS[1], 0, 0) if #item == 0 then return nil end item = item[1] redis.call("zrem", KEYS[1], item) redis.call("zadd", KEYS[2], ARGV[1], item) redis.call("hset", KEYS[3], "i" .. item, "w" .. ARGV[2]) redis.call("hset", KEYS[3], "w" .. ARGV[2], "i" .. item) return item """) result = script(keys=[self._key_available(), self._key_expiration(), self._key_workers()], args=[expiration, self._get_worker_id(conn)]) return result
Get the highest-priority item out of this queue. Returns the item, or None if no items are available. The item must be either ``return_item()`` or ``renew_item()`` before ``expiration`` seconds pass, or it will become available to future callers. The item will be marked as being owned by ``worker_id``.
def application_adapter(obj, request): """ Adapter for rendering a :class:`pyramid_urireferencer.models.ApplicationResponse` to json. :param pyramid_urireferencer.models.ApplicationResponse obj: The response to be rendered. :rtype: :class:`dict` """ return { 'title': obj.title, 'uri': obj.uri, 'service_url': obj.service_url, 'success': obj.success, 'has_references': obj.has_references, 'count': obj.count, 'items': [{ 'uri': i.uri, 'title': i.title } for i in obj.items] if obj.items is not None else None }
Adapter for rendering a :class:`pyramid_urireferencer.models.ApplicationResponse` to json. :param pyramid_urireferencer.models.ApplicationResponse obj: The response to be rendered. :rtype: :class:`dict`
def store_sentry(self, username, sentry_bytes): """ Store sentry bytes under a username :param username: username :type username: :class:`str` :return: Whenver the operation succeed :rtype: :class:`bool` """ filepath = self._get_sentry_path(username) if filepath: try: with open(filepath, 'wb') as f: f.write(sentry_bytes) return True except IOError as e: self._LOG.error("store_sentry: %s" % str(e)) return False
Store sentry bytes under a username :param username: username :type username: :class:`str` :return: Whenver the operation succeed :rtype: :class:`bool`
def Freqs(self,jr,jphi,jz,**kwargs): """ NAME: Freqs PURPOSE: return the frequencies corresponding to a torus INPUT: jr - radial action (scalar) jphi - azimuthal action (scalar) jz - vertical action (scalar) tol= (object-wide value) goal for |dJ|/|J| along the torus OUTPUT: (OmegaR,Omegaphi,Omegaz) HISTORY: 2015-08-07 - Written - Bovy (UofT) """ out= actionAngleTorus_c.actionAngleTorus_Freqs_c(\ self._pot, jr,jphi,jz, tol=kwargs.get('tol',self._tol)) if out[3] != 0: warnings.warn("actionAngleTorus' AutoFit exited with non-zero return status %i: %s" % (out[3],_autofit_errvals[out[3]]), galpyWarning) return out
NAME: Freqs PURPOSE: return the frequencies corresponding to a torus INPUT: jr - radial action (scalar) jphi - azimuthal action (scalar) jz - vertical action (scalar) tol= (object-wide value) goal for |dJ|/|J| along the torus OUTPUT: (OmegaR,Omegaphi,Omegaz) HISTORY: 2015-08-07 - Written - Bovy (UofT)
def __dbfRecord(self, record): """Writes the dbf records.""" f = self.__getFileObj(self.dbf) if self.recNum == 0: # first records, so all fields should be set # allowing us to write the dbf header # cannot change the fields after this point self.__dbfHeader() # begin self.recNum += 1 if not self.fields[0][0].startswith("Deletion"): f.write(b' ') # deletion flag for (fieldName, fieldType, size, deci), value in zip(self.fields, record): fieldType = fieldType.upper() size = int(size) if fieldType in ("N","F"): # numeric or float: number stored as a string, right justified, and padded with blanks to the width of the field. if value in MISSING: value = b"*"*size # QGIS NULL elif not deci: # force to int try: # first try to force directly to int. # forcing a large int to float and back to int # will lose information and result in wrong nr. value = int(value) except ValueError: # forcing directly to int failed, so was probably a float. value = int(float(value)) value = format(value, "d")[:size].rjust(size) # caps the size if exceeds the field size else: value = float(value) value = format(value, ".%sf"%deci)[:size].rjust(size) # caps the size if exceeds the field size elif fieldType == "D": # date: 8 bytes - date stored as a string in the format YYYYMMDD. if isinstance(value, date): value = '{:04d}{:02d}{:02d}'.format(value.year, value.month, value.day) elif isinstance(value, list) and len(value) == 3: value = '{:04d}{:02d}{:02d}'.format(*value) elif value in MISSING: value = b'0' * 8 # QGIS NULL for date type elif is_string(value) and len(value) == 8: pass # value is already a date string else: raise ShapefileException("Date values must be either a datetime.date object, a list, a YYYYMMDD string, or a missing value.") elif fieldType == 'L': # logical: 1 byte - initialized to 0x20 (space) otherwise T or F. if value in MISSING: value = b' ' # missing is set to space elif value in [True,1]: value = b'T' elif value in [False,0]: value = b'F' else: value = b' ' # unknown is set to space else: # anything else is forced to string, truncated to the length of the field value = b(value, self.encoding, self.encodingErrors)[:size].ljust(size) if not isinstance(value, bytes): # just in case some of the numeric format() and date strftime() results are still in unicode (Python 3 only) value = b(value, 'ascii', self.encodingErrors) # should be default ascii encoding if len(value) != size: raise ShapefileException( "Shapefile Writer unable to pack incorrect sized value" " (size %d) into field '%s' (size %d)." % (len(value), fieldName, size)) f.write(value)
Writes the dbf records.
def to_python(self, value): """ "Called during deserialization and during the clean() method used from forms.... [s]hould deal gracefully with... (*) an instance of the correct type; (*) a string; (*) None (if the field allows null=True)." "For ``to_python()``, if anything goes wrong during value conversion, you should raise a ``ValidationError`` exception." """ if value is None: return value if not isinstance(value, str): return value try: return json_decode(value) except Exception as err: raise ValidationError(repr(err))
"Called during deserialization and during the clean() method used from forms.... [s]hould deal gracefully with... (*) an instance of the correct type; (*) a string; (*) None (if the field allows null=True)." "For ``to_python()``, if anything goes wrong during value conversion, you should raise a ``ValidationError`` exception."
def value(self): """returns the object as dictionary""" if self._outline is None: return { "type" : "esriSMS", "style" : self._style, "color" : self._color.value, "size" : self._size, "angle" : self._angle, "xoffset" : self._xoffset, "yoffset" : self._yoffset } else: return { "type" : "esriSMS", "style" : self._style, "color" : self._color.value, "size" : self._size, "angle" : self._angle, "xoffset" : self._xoffset, "yoffset" : self._yoffset, "outline" : { "width" : self._outline['width'], "color" : self._color.value } }
returns the object as dictionary
def getAggregation(self): """Returns: str : URIRef of the Aggregation entity """ self._check_initialized() return [ o for o in self.subjects(predicate=rdflib.RDF.type, object=ORE.Aggregation) ][0]
Returns: str : URIRef of the Aggregation entity
def connect(self, fedora_url, data=None, method='Get'): """Method attempts to connect to REST servers of the Fedora Commons repository using optional data parameter. Args: fedora_url(string): Fedora URL data(dict): Data to through to REST endpoint method(str): REST Method, defaults to GET Returns: result(string): Response string from Fedora """ if data is None: data = {} if not fedora_url.startswith("http"): fedora_url = urllib.parse.urljoin(self.base_url, fedora_url) request = urllib.request.Request(fedora_url, method=method) request.add_header('Accept', 'text/turtle') request.add_header('Content-Type', 'text/turtle') if len(data) > 0: request.data = data try: response = urllib.request.urlopen(request) except urllib.error.URLError as err: if hasattr(err, 'reason'): print("failed to reach server at {} with {} method".format( fedora_url, request.method)) print("Reason: ", err.reason) print("Data: ", data) elif hasattr(err, 'code'): print("Server error {}".format(err.code)) raise err return response
Method attempts to connect to REST servers of the Fedora Commons repository using optional data parameter. Args: fedora_url(string): Fedora URL data(dict): Data to through to REST endpoint method(str): REST Method, defaults to GET Returns: result(string): Response string from Fedora
def padding(self): """Returns the padding algorithm used, if this is the same for all dims. Use `.paddings` if you want a tuple with the padding algorithm used for each dimension. Returns: The padding algorithm used, if this is the same for all dimensions. Raises: ValueError: If different padding algorithms are used for different dimensions. """ # This is for backwards compatibility -- previously only a single # padding setting was supported across all dimensions. if all(p == self._padding[0] for p in self._padding): return self._padding[0] else: raise ValueError("This layer uses different paddings for different " "dimensions. Use .paddings if you want a tuple of " "per-dimension padding settings.")
Returns the padding algorithm used, if this is the same for all dims. Use `.paddings` if you want a tuple with the padding algorithm used for each dimension. Returns: The padding algorithm used, if this is the same for all dimensions. Raises: ValueError: If different padding algorithms are used for different dimensions.
def __detect_cl_tool(env, chainkey, cdict, cpriority=None): """ Helper function, picks a command line tool from the list and initializes its environment variables. """ if env.get(chainkey,'') == '': clpath = '' if cpriority is None: cpriority = cdict.keys() for cltool in cpriority: if __debug_tool_location: print("DocBook: Looking for %s"%cltool) clpath = env.WhereIs(cltool) if clpath: if __debug_tool_location: print("DocBook: Found:%s"%cltool) env[chainkey] = clpath if not env[chainkey + 'COM']: env[chainkey + 'COM'] = cdict[cltool] break
Helper function, picks a command line tool from the list and initializes its environment variables.
def clean_requires_python(candidates): """Get a cleaned list of all the candidates with valid specifiers in the `requires_python` attributes.""" all_candidates = [] sys_version = ".".join(map(str, sys.version_info[:3])) from packaging.version import parse as parse_version py_version = parse_version(os.environ.get("PIP_PYTHON_VERSION", sys_version)) for c in candidates: from_location = attrgetter("location.requires_python") requires_python = getattr(c, "requires_python", from_location(c)) if requires_python: # Old specifications had people setting this to single digits # which is effectively the same as '>=digit,<digit+1' if requires_python.isdigit(): requires_python = ">={0},<{1}".format( requires_python, int(requires_python) + 1 ) try: specifierset = SpecifierSet(requires_python) except InvalidSpecifier: continue else: if not specifierset.contains(py_version): continue all_candidates.append(c) return all_candidates
Get a cleaned list of all the candidates with valid specifiers in the `requires_python` attributes.
def acquire(self, host: str, port: int, use_ssl: bool=False, host_key: Optional[Any]=None) \ -> Union[Connection, SSLConnection]: '''Return an available connection. Args: host: A hostname or IP address. port: Port number. use_ssl: Whether to return a SSL connection. host_key: If provided, it overrides the key used for per-host connection pooling. This is useful for proxies for example. Coroutine. ''' assert isinstance(port, int), 'Expect int. Got {}'.format(type(port)) assert not self._closed yield from self._process_no_wait_releases() if use_ssl: connection_factory = functools.partial( self._ssl_connection_factory, hostname=host) else: connection_factory = functools.partial( self._connection_factory, hostname=host) connection_factory = functools.partial( HappyEyeballsConnection, (host, port), connection_factory, self._resolver, self._happy_eyeballs_table, is_ssl=use_ssl ) key = host_key or (host, port, use_ssl) with (yield from self._host_pools_lock): if key not in self._host_pools: host_pool = self._host_pools[key] = HostPool( connection_factory, max_connections=self._max_host_count ) self._host_pool_waiters[key] = 1 else: host_pool = self._host_pools[key] self._host_pool_waiters[key] += 1 _logger.debug('Check out %s', key) connection = yield from host_pool.acquire() connection.key = key # TODO: Verify this assert is always true # assert host_pool.count() <= host_pool.max_connections # assert key in self._host_pools # assert self._host_pools[key] == host_pool with (yield from self._host_pools_lock): self._host_pool_waiters[key] -= 1 return connection
Return an available connection. Args: host: A hostname or IP address. port: Port number. use_ssl: Whether to return a SSL connection. host_key: If provided, it overrides the key used for per-host connection pooling. This is useful for proxies for example. Coroutine.
def add_methods(source_class, blacklist=()): """Add wrapped versions of the `api` member's methods to the class. Any methods passed in `blacklist` are not added. Additionally, any methods explicitly defined on the wrapped class are not added. """ def wrap(wrapped_fx): """Wrap a GAPIC method; preserve its name and docstring.""" # If this is a static or class method, then we need to *not* # send self as the first argument. # # Similarly, for instance methods, we need to send self.api rather # than self, since that is where the actual methods were declared. instance_method = True # If this is a bound method it's a classmethod. self = getattr(wrapped_fx, "__self__", None) if issubclass(type(self), type): instance_method = False # Okay, we have figured out what kind of method this is; send # down the correct wrapper function. if instance_method: fx = lambda self, *a, **kw: wrapped_fx(self.api, *a, **kw) # noqa return functools.wraps(wrapped_fx)(fx) fx = lambda *a, **kw: wrapped_fx(*a, **kw) # noqa return staticmethod(functools.wraps(wrapped_fx)(fx)) def actual_decorator(cls): # Reflectively iterate over most of the methods on the source class # (the GAPIC) and make wrapped versions available on this client. for name in dir(source_class): # Ignore all private and magic methods. if name.startswith("_"): continue # Ignore anything on our blacklist. if name in blacklist: continue # Retrieve the attribute, and ignore it if it is not callable. attr = getattr(source_class, name) if not callable(attr): continue # Add a wrapper method to this object. fx = wrap(getattr(source_class, name)) setattr(cls, name, fx) # Return the augmented class. return cls # Simply return the actual decorator; this is returned from this method # and actually used to decorate the class. return actual_decorator
Add wrapped versions of the `api` member's methods to the class. Any methods passed in `blacklist` are not added. Additionally, any methods explicitly defined on the wrapped class are not added.
def sendDocument(self, chat_id, document, thumb=None, caption=None, parse_mode=None, disable_notification=None, reply_to_message_id=None, reply_markup=None): """ See: https://core.telegram.org/bots/api#senddocument :param document: Same as ``photo`` in :meth:`amanobot.Bot.sendPhoto` """ p = _strip(locals(), more=['document']) return self._api_request_with_file('sendDocument', _rectify(p), 'document', document)
See: https://core.telegram.org/bots/api#senddocument :param document: Same as ``photo`` in :meth:`amanobot.Bot.sendPhoto`
def handle_verification_form(form): """Handle email sending verification form.""" form.process(formdata=request.form) if form.validate_on_submit(): send_confirmation_instructions(current_user) # NOTE: Flash message. flash(_("Verification email sent."), category="success")
Handle email sending verification form.
def build_tree(self, data, tagname, attrs=None, depth=0): r"""Build xml tree. :param data: data for build xml. :param tagname: element tag name. :param attrs: element attributes. Default:``None``. :type attrs: dict or None :param depth: element depth of the hierarchy. Default:``0``. :type depth: int """ if data is None: data = '' indent = ('\n%s' % (self.__options['indent'] * depth)) if self.__options['indent'] else '' if isinstance(data, utils.DictTypes): if self.__options['hasattr'] and self.check_structure(data.keys()): attrs, values = self.pickdata(data) self.build_tree(values, tagname, attrs, depth) else: self.__tree.append('%s%s' % (indent, self.tag_start(tagname, attrs))) iter = data.iteritems() if self.__options['ksort']: iter = sorted(iter, key=lambda x:x[0], reverse=self.__options['reverse']) for k, v in iter: attrs = {} if self.__options['hasattr'] and isinstance(v, utils.DictTypes) and self.check_structure(v.keys()): attrs, v = self.pickdata(v) self.build_tree(v, k, attrs, depth+1) self.__tree.append('%s%s' % (indent, self.tag_end(tagname))) elif utils.is_iterable(data): for v in data: self.build_tree(v, tagname, attrs, depth) else: self.__tree.append(indent) data = self.safedata(data, self.__options['cdata']) self.__tree.append(self.build_tag(tagname, data, attrs))
r"""Build xml tree. :param data: data for build xml. :param tagname: element tag name. :param attrs: element attributes. Default:``None``. :type attrs: dict or None :param depth: element depth of the hierarchy. Default:``0``. :type depth: int
def _run_command( command, input_data=None, stdin=None, out_pipe=subprocess.PIPE, err_pipe=subprocess.PIPE, env=None, uuid=None, **kwargs ): """ Runs a command Args: command(list of str): args of the command to execute, including the command itself as command[0] as `['ls', '-l']` input_data(str): If passed, will feed that data to the subprocess through stdin out_pipe(int or file): File descriptor as passed to :ref:subprocess.Popen to use as stdout stdin(int or file): File descriptor as passed to :ref:subprocess.Popen to use as stdin err_pipe(int or file): File descriptor as passed to :ref:subprocess.Popen to use as stderr env(dict of str:str): If set, will use the given dict as env for the subprocess uuid(uuid): If set the command will be logged with the given uuid converted to string, otherwise, a uuid v4 will be generated. **kwargs: Any other keyword args passed will be passed to the :ref:subprocess.Popen call Returns: lago.utils.CommandStatus: result of the interactive execution """ # add libexec to PATH if needed if uuid is None: uuid = uuid_m.uuid4() if constants.LIBEXEC_DIR not in os.environ['PATH'].split(':'): os.environ['PATH' ] = '%s:%s' % (constants.LIBEXEC_DIR, os.environ['PATH']) if input_data and not stdin: kwargs['stdin'] = subprocess.PIPE elif stdin: kwargs['stdin'] = stdin if env is None: env = os.environ.copy() else: env['PATH'] = ':'.join( list( set( env.get('PATH', '').split(':') + os.environ['PATH'] .split(':') ), ), ) popen = subprocess.Popen( ' '.join('"%s"' % arg for arg in command), stdout=out_pipe, stderr=err_pipe, shell=True, env=env, **kwargs ) out, err = popen.communicate(input_data) LOGGER.debug( '%s: command exit with return code: %d', str(uuid), popen.returncode ) if out: LOGGER.debug('%s: command stdout: %s', str(uuid), out) if err: LOGGER.debug('%s: command stderr: %s', str(uuid), err) return CommandStatus(popen.returncode, out, err)
Runs a command Args: command(list of str): args of the command to execute, including the command itself as command[0] as `['ls', '-l']` input_data(str): If passed, will feed that data to the subprocess through stdin out_pipe(int or file): File descriptor as passed to :ref:subprocess.Popen to use as stdout stdin(int or file): File descriptor as passed to :ref:subprocess.Popen to use as stdin err_pipe(int or file): File descriptor as passed to :ref:subprocess.Popen to use as stderr env(dict of str:str): If set, will use the given dict as env for the subprocess uuid(uuid): If set the command will be logged with the given uuid converted to string, otherwise, a uuid v4 will be generated. **kwargs: Any other keyword args passed will be passed to the :ref:subprocess.Popen call Returns: lago.utils.CommandStatus: result of the interactive execution
def check_update (): """Return the following values: (False, errmsg) - online version could not be determined (True, None) - user has newest version (True, (version, url string)) - update available (True, (version, None)) - current version is newer than online version """ version, value = get_online_version() if version is None: # value is an error message return False, value if version == CurrentVersion: # user has newest version return True, None if is_newer_version(version): # value is an URL linking to the update package return True, (version, value) # user is running a local or development version return True, (version, None)
Return the following values: (False, errmsg) - online version could not be determined (True, None) - user has newest version (True, (version, url string)) - update available (True, (version, None)) - current version is newer than online version
def chainproperty(func): """Extend sure with a custom chain property.""" func = assertionproperty(func) setattr(AssertionBuilder, func.fget.__name__, func) return func
Extend sure with a custom chain property.
def add_allowance(self, caller): """Adds a new allowance for the method. :param: tuple caller: A tuple indicating where the method was called :return: The new ``Allowance``. :rtype: Allowance """ allowance = Allowance(self._target, self._method_name, caller) self._allowances.insert(0, allowance) return allowance
Adds a new allowance for the method. :param: tuple caller: A tuple indicating where the method was called :return: The new ``Allowance``. :rtype: Allowance
def load(fnames, tag='', sat_id=None): """ Load the SuperMAG files Parameters ----------- fnames : (list) List of filenames tag : (str or NoneType) Denotes type of file to load. Accepted types are 'indices', 'all', 'stations', and '' (for just magnetometer measurements). (default='') sat_id : (str or NoneType) Satellite ID for constellations, not used. (default=None) Returns -------- data : (pandas.DataFrame) Object containing satellite data meta : (pysat.Meta) Object containing metadata such as column names and units """ # Ensure that there are files to load if len(fnames) <= 0 : return pysat.DataFrame(None), pysat.Meta(None) # Ensure that the files are in a list if isinstance(fnames, str): fnames = [fnames] # Initialise the output data data = pds.DataFrame() baseline = list() # Cycle through the files for fname in fnames: fname = fname[:-11] # Remove date index from end of filename file_type = path.splitext(fname)[1].lower() # Open and load the files for each file type if file_type == ".csv": if tag != "indices": temp = load_csv_data(fname, tag) else: temp, bline = load_ascii_data(fname, tag) if bline is not None: baseline.append(bline) # Save the loaded data in the output data structure if len(temp.columns) > 0: data = pds.concat([data, temp], axis=0) del temp # If data was loaded, update the meta data if len(data.columns) > 0: meta = pysat.Meta() for cc in data.columns: meta[cc] = update_smag_metadata(cc) meta.info = {'baseline':format_baseline_list(baseline)} else: meta = pysat.Meta(None) return data, meta
Load the SuperMAG files Parameters ----------- fnames : (list) List of filenames tag : (str or NoneType) Denotes type of file to load. Accepted types are 'indices', 'all', 'stations', and '' (for just magnetometer measurements). (default='') sat_id : (str or NoneType) Satellite ID for constellations, not used. (default=None) Returns -------- data : (pandas.DataFrame) Object containing satellite data meta : (pysat.Meta) Object containing metadata such as column names and units
def create(self, name: str, descriptor: str, value: Constant=None) -> Field: """ Creates a new field from `name` and `descriptor`. For example:: >>> from jawa.cf import ClassFile >>> cf = ClassFile.create('BeerCounter') >>> field = cf.fields.create('BeerCount', 'I') To automatically create a static field, pass a value:: >>> from jawa.cf import ClassFile >>> cf = ClassFile.create('BeerCounter') >>> field = cf.fields.create( ... 'MaxBeer', ... 'I', ... cf.constants.create_integer(99) ... ) :param name: Name of the new field. :param descriptor: Type descriptor of the new field. :param value: Optional static value for the field. """ field = Field(self._cf) name = self._cf.constants.create_utf8(name) descriptor = self._cf.constants.create_utf8(descriptor) field._name_index = name.index field._descriptor_index = descriptor.index field.access_flags.acc_public = True if value is not None: field.attributes.create(ConstantValueAttribute, value) field.access_flags.acc_static = True self.append(field) return field
Creates a new field from `name` and `descriptor`. For example:: >>> from jawa.cf import ClassFile >>> cf = ClassFile.create('BeerCounter') >>> field = cf.fields.create('BeerCount', 'I') To automatically create a static field, pass a value:: >>> from jawa.cf import ClassFile >>> cf = ClassFile.create('BeerCounter') >>> field = cf.fields.create( ... 'MaxBeer', ... 'I', ... cf.constants.create_integer(99) ... ) :param name: Name of the new field. :param descriptor: Type descriptor of the new field. :param value: Optional static value for the field.
def get_dataset(dataset_id,**kwargs): """ Get a single dataset, by ID """ user_id = int(kwargs.get('user_id')) if dataset_id is None: return None try: dataset_rs = db.DBSession.query(Dataset.id, Dataset.type, Dataset.unit_id, Dataset.name, Dataset.hidden, Dataset.cr_date, Dataset.created_by, DatasetOwner.user_id, null().label('metadata'), case([(and_(Dataset.hidden=='Y', DatasetOwner.user_id is not None), None)], else_=Dataset.value).label('value')).filter( Dataset.id==dataset_id).outerjoin(DatasetOwner, and_(DatasetOwner.dataset_id==Dataset.id, DatasetOwner.user_id==user_id)).one() rs_dict = dataset_rs._asdict() #convert the value row into a string as it is returned as a binary if dataset_rs.value is not None: rs_dict['value'] = str(dataset_rs.value) if dataset_rs.hidden == 'N' or (dataset_rs.hidden == 'Y' and dataset_rs.user_id is not None): metadata = db.DBSession.query(Metadata).filter(Metadata.dataset_id==dataset_id).all() rs_dict['metadata'] = metadata else: rs_dict['metadata'] = [] except NoResultFound: raise HydraError("Dataset %s does not exist."%(dataset_id)) dataset = namedtuple('Dataset', rs_dict.keys())(**rs_dict) return dataset
Get a single dataset, by ID
def lines(self: object, fileids: str, plaintext: bool = True): """ Tokenizes documents in the corpus by line """ for text in self.texts(fileids, plaintext): text = re.sub(r'\n\s*\n', '\n', text, re.MULTILINE) # Remove blank lines for line in text.split('\n'): yield line
Tokenizes documents in the corpus by line
def _add_subtitles(self, subtitles): '''Adds subtitles to playing video. :param subtitles: A URL to a remote subtitles file or a local filename for a subtitles file. .. warning:: You must start playing a video before calling this method or it will loop for an indefinite length. ''' # This method is named with an underscore to suggest that callers pass # the subtitles argument to set_resolved_url instead of calling this # method directly. This is to ensure a video is played before calling # this method. player = xbmc.Player() for _ in xrange(30): if player.isPlaying(): break time.sleep(1) else: raise Exception('No video playing. Aborted after 30 seconds.') player.setSubtitles(subtitles)
Adds subtitles to playing video. :param subtitles: A URL to a remote subtitles file or a local filename for a subtitles file. .. warning:: You must start playing a video before calling this method or it will loop for an indefinite length.
def iter_columns(condition): """ Yield all columns in the condition or its inner conditions. Unwraps proxies when the condition's column (or any of its values) include paths. """ # Like iter_conditions, this can't live in each condition without going possibly infinite on the # recursion, or passing the visited set through every call. That makes the signature ugly, so we # take care of it here. Luckily, it's pretty easy to leverage iter_conditions and just unpack the # actual columns. visited = set() for condition in iter_conditions(condition): if condition.operation in ("and", "or", "not"): continue # Non-meta conditions always have a column, and each of values has the potential to be a column. # Comparison will only have a list of len 1, but it's simpler to just iterate values and check each # unwrap proxies created for paths column = proxied(condition.column) # special case for None # this could also have skipped on isinstance(condition, Condition) # but this is slightly more flexible for users to create their own None-sentinel Conditions if column is None: continue if column not in visited: visited.add(column) yield column for value in condition.values: if isinstance(value, ComparisonMixin): if value not in visited: visited.add(value) yield value
Yield all columns in the condition or its inner conditions. Unwraps proxies when the condition's column (or any of its values) include paths.
def _logging_callback(level, domain, message, data): """ Callback that outputs libgphoto2's logging message via Python's standard logging facilities. :param level: libgphoto2 logging level :param domain: component the message originates from :param message: logging message :param data: Other data in the logging record (unused) """ domain = ffi.string(domain).decode() message = ffi.string(message).decode() logger = LOGGER.getChild(domain) if level not in LOG_LEVELS: return logger.log(LOG_LEVELS[level], message)
Callback that outputs libgphoto2's logging message via Python's standard logging facilities. :param level: libgphoto2 logging level :param domain: component the message originates from :param message: logging message :param data: Other data in the logging record (unused)
def Sign(self, data, signing_key, verify_key=None): """Use the data to sign this blob. Args: data: String containing the blob data. signing_key: The key to sign with. verify_key: Key to verify with. If None we assume the signing key also contains the public key. Returns: self for call chaining. """ if signing_key.KeyLen() < 2048: logging.warning("signing key is too short.") self.signature = signing_key.Sign(data) self.signature_type = self.SignatureType.RSA_PKCS1v15 self.digest = hashlib.sha256(data).digest() self.digest_type = self.HashType.SHA256 self.data = data # Test we can verify before we send it off. if verify_key is None: verify_key = signing_key.GetPublicKey() # Verify our own data. self.Verify(verify_key) return self
Use the data to sign this blob. Args: data: String containing the blob data. signing_key: The key to sign with. verify_key: Key to verify with. If None we assume the signing key also contains the public key. Returns: self for call chaining.
def initializerepo(self): """ Fill empty directory with products and make first commit """ try: os.mkdir(self.repopath) except OSError: pass cmd = self.repo.init(bare=self.bare, shared=self.shared) if not self.bare: self.write_testing_data([], []) self.write_training_data([], []) self.write_classifier(None) cmd = self.repo.add('training.pkl') cmd = self.repo.add('testing.pkl') cmd = self.repo.add('classifier.pkl') cmd = self.repo.commit(m='initial commit') cmd = self.repo.tag('initial') cmd = self.set_version('initial')
Fill empty directory with products and make first commit
def decimal_year(year, month, day): """ Allows to calculate the decimal year for a vector of dates (TODO this is legacy code kept to maintain comparability with previous declustering algorithms!) :param year: year column from catalogue matrix :type year: numpy.ndarray :param month: month column from catalogue matrix :type month: numpy.ndarray :param day: day column from catalogue matrix :type day: numpy.ndarray :returns: decimal year column :rtype: numpy.ndarray """ marker = np.array([0., 31., 59., 90., 120., 151., 181., 212., 243., 273., 304., 334.]) tmonth = (month - 1).astype(int) day_count = marker[tmonth] + day - 1. dec_year = year + (day_count / 365.) return dec_year
Allows to calculate the decimal year for a vector of dates (TODO this is legacy code kept to maintain comparability with previous declustering algorithms!) :param year: year column from catalogue matrix :type year: numpy.ndarray :param month: month column from catalogue matrix :type month: numpy.ndarray :param day: day column from catalogue matrix :type day: numpy.ndarray :returns: decimal year column :rtype: numpy.ndarray
def case(self, case_id=None): """Return a Case object If no case_id is given return one case Args: case_id (str): A case id Returns: case(Case): A Case object """ cases = self.cases() if case_id: for case in cases: if case.case_id == case_id: return case else: if cases: return cases[0] return None
Return a Case object If no case_id is given return one case Args: case_id (str): A case id Returns: case(Case): A Case object
def _parseDOM(istack): """ Recursively go through element array and create DOM. Args: istack (list): List of :class:`.HTMLElement` objects. Returns: list: DOM tree as list. """ ostack = [] end_tag_index = 0 def neither_nonpair_or_end_or_comment(el): return not (el.isNonPairTag() or el.isEndTag() or el.isComment()) index = 0 while index < len(istack): el = istack[index] # check if this is pair tag end_tag_index = _indexOfEndTag(istack[index:]) if end_tag_index == 0 and neither_nonpair_or_end_or_comment(el): el.isNonPairTag(True) if end_tag_index == 0: if not el.isEndTag(): ostack.append(el) else: el.childs = _parseDOM(istack[index + 1: end_tag_index + index]) el.endtag = istack[end_tag_index + index] # reference to endtag el.endtag.openertag = el ostack.append(el) ostack.append(el.endtag) index = end_tag_index + index index += 1 return ostack
Recursively go through element array and create DOM. Args: istack (list): List of :class:`.HTMLElement` objects. Returns: list: DOM tree as list.
def create_resource(self, resource_type=None, uri=None): ''' Convenience method for creating a new resource Note: A Resource is instantiated, but is not yet created. Still requires resource.create(). Args: uri (rdflib.term.URIRef, str): uri of resource to create resource_type (NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): resource type to create Returns: (NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): instance of appropriate type ''' if resource_type in [NonRDFSource, Binary, BasicContainer, DirectContainer, IndirectContainer]: return resource_type(self, uri) else: raise TypeError("expecting Resource type, such as BasicContainer or NonRDFSource")
Convenience method for creating a new resource Note: A Resource is instantiated, but is not yet created. Still requires resource.create(). Args: uri (rdflib.term.URIRef, str): uri of resource to create resource_type (NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): resource type to create Returns: (NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): instance of appropriate type
def list_(): ''' Return a list of accepted, denied, unaccepted and rejected keys. This is the same output as `salt-key -L` CLI Example: .. code-block:: bash salt 'master' minion.list ''' pki_dir = __salt__['config.get']('pki_dir', '') # We have to replace the minion/master directories pki_dir = pki_dir.replace('minion', 'master') # The source code below is (nearly) a copy of salt.key.Key.list_keys key_dirs = _check_minions_directories(pki_dir) ret = {} for dir_ in key_dirs: ret[os.path.basename(dir_)] = [] try: for fn_ in salt.utils.data.sorted_ignorecase(os.listdir(dir_)): if not fn_.startswith('.'): if os.path.isfile(os.path.join(dir_, fn_)): ret[os.path.basename(dir_)].append(fn_) except (OSError, IOError): # key dir kind is not created yet, just skip continue return ret
Return a list of accepted, denied, unaccepted and rejected keys. This is the same output as `salt-key -L` CLI Example: .. code-block:: bash salt 'master' minion.list
def get_port_vendor_info(port=None): """ Return vendor informations of a usb2serial device. It may depends on the Operating System. :param string port: port of the usb2serial device :Example: Result with a USB2Dynamixel on Linux: In [1]: import pypot.dynamixel In [2]: pypot.dynamixel.get_port_vendor_info('/dev/ttyUSB0') Out[2]: 'USB VID:PID=0403:6001 SNR=A7005LKE' """ port_info_dict = dict((x[0], x[2]) for x in serial.tools.list_ports.comports()) return port_info_dict[port] if port is not None else port_info_dict
Return vendor informations of a usb2serial device. It may depends on the Operating System. :param string port: port of the usb2serial device :Example: Result with a USB2Dynamixel on Linux: In [1]: import pypot.dynamixel In [2]: pypot.dynamixel.get_port_vendor_info('/dev/ttyUSB0') Out[2]: 'USB VID:PID=0403:6001 SNR=A7005LKE'
def debug_dump(message, file_prefix="dump"): """ Utility while developing to dump message data to play with in the interpreter """ global index index += 1 with open("%s_%s.dump" % (file_prefix, index), 'w') as f: f.write(message.SerializeToString()) f.close()
Utility while developing to dump message data to play with in the interpreter
def process(self, request, item): """Do a direct payment.""" warn_untested() from paypal.pro.helpers import PayPalWPP wpp = PayPalWPP(request) # Change the model information into a dict that PayPal can understand. params = model_to_dict(self, exclude=self.ADMIN_FIELDS) params['acct'] = self.acct params['creditcardtype'] = self.creditcardtype params['expdate'] = self.expdate params['cvv2'] = self.cvv2 params.update(item) # Create recurring payment: if 'billingperiod' in params: return wpp.createRecurringPaymentsProfile(params, direct=True) # Create single payment: else: return wpp.doDirectPayment(params)
Do a direct payment.
def date_to_um_dump_date(date): """ Convert a time date object to a um dump format date which is <decade><year><month><day>0 To accommodate two digit months and days the UM uses letters. e.g. 1st oct is writing 01a10. """ assert(date.month <= 12) decade = date.year // 10 # UM can only handle 36 decades then goes back to the beginning. decade = decade % 36 year = date.year % 10 month = date.month day = date.day um_d = string.digits + string.ascii_letters[:26] um_dump_date = ( '{decade}{year}{month}{day}0'.format( decade=um_d[decade], year=um_d[year], month=um_d[month], day=um_d[day] ) ) return um_dump_date
Convert a time date object to a um dump format date which is <decade><year><month><day>0 To accommodate two digit months and days the UM uses letters. e.g. 1st oct is writing 01a10.
def clausulae_analysis(prosody): """ Return dictionary in which the key is a type of clausula and the value is its frequency. :param prosody: the prosody of a prose text (must be in the format of the scansion produced by the scanner classes. :return: dictionary of prosody """ prosody = ''.join(prosody) return { 'cretic + trochee': prosody.count('¯˘¯¯x'), '4th paeon + trochee': prosody.count('˘˘˘¯¯x'), '1st paeon + trochee': prosody.count('¯˘˘˘¯x'), 'substituted cretic + trochee': prosody.count('˘˘˘˘˘¯x'), '1st paeon + anapest': prosody.count('¯˘˘˘˘˘x'), 'double cretic': prosody.count('¯˘¯¯˘x'), '4th paeon + cretic': prosody.count('˘˘˘¯¯˘x'), 'molossus + cretic': prosody.count('¯¯¯¯˘x'), 'double trochee': prosody.count('¯˘¯x'), 'molossus + double trochee': prosody.count('¯¯¯¯˘¯x'), 'cretic + double trochee': prosody.count('¯˘¯¯˘¯x'), 'dactyl + double trochee': prosody.count('¯˘˘¯˘¯x'), 'choriamb + double trochee': prosody.count('¯˘˘¯¯˘¯x'), 'cretic + iamb': prosody.count('¯˘¯˘x'), 'molossus + iamb': prosody.count('¯¯¯˘x'), 'double spondee': prosody.count('¯¯¯x'), 'cretic + double spondee': prosody.count('¯˘¯¯¯¯x'), 'heroic': prosody.count('¯˘˘¯x') }
Return dictionary in which the key is a type of clausula and the value is its frequency. :param prosody: the prosody of a prose text (must be in the format of the scansion produced by the scanner classes. :return: dictionary of prosody
def load_config(self): """Load the config from the config file or template.""" config = Config() self.config_obj = config.load('awsshellrc') self.config_section = self.config_obj['aws-shell'] self.model_completer.match_fuzzy = self.config_section.as_bool( 'match_fuzzy') self.enable_vi_bindings = self.config_section.as_bool( 'enable_vi_bindings') self.show_completion_columns = self.config_section.as_bool( 'show_completion_columns') self.show_help = self.config_section.as_bool('show_help') self.theme = self.config_section['theme']
Load the config from the config file or template.
def datacenter_configured(name): ''' Makes sure a datacenter exists. If the state is run by an ``esxdatacenter`` minion, the name of the datacenter is retrieved from the proxy details, otherwise the datacenter has the same name as the state. Supported proxies: esxdatacenter name: Datacenter name. Ignored if the proxytype is ``esxdatacenter``. ''' proxy_type = __salt__['vsphere.get_proxy_type']() if proxy_type == 'esxdatacenter': dc_name = __salt__['esxdatacenter.get_details']()['datacenter'] else: dc_name = name log.info('Running datacenter_configured for datacenter \'%s\'', dc_name) ret = {'name': name, 'changes': {}, 'result': None, 'comment': 'Default'} comments = [] si = None try: si = __salt__['vsphere.get_service_instance_via_proxy']() dcs = __salt__['vsphere.list_datacenters_via_proxy']( datacenter_names=[dc_name], service_instance=si) if not dcs: if __opts__['test']: comments.append('State will create ' 'datacenter \'{0}\'.'.format(dc_name)) else: log.debug('Creating datacenter \'%s\'', dc_name) __salt__['vsphere.create_datacenter'](dc_name, si) comments.append('Created datacenter \'{0}\'.'.format(dc_name)) log.info(comments[-1]) ret['changes'].update({'new': {'name': dc_name}}) else: comments.append('Datacenter \'{0}\' already exists. Nothing to be ' 'done.'.format(dc_name)) log.info(comments[-1]) __salt__['vsphere.disconnect'](si) ret['comment'] = '\n'.join(comments) ret['result'] = None if __opts__['test'] and ret['changes'] else True return ret except salt.exceptions.CommandExecutionError as exc: log.error('Error: %s', exc) if si: __salt__['vsphere.disconnect'](si) ret.update({ 'result': False if not __opts__['test'] else None, 'comment': six.text_type(exc)}) return ret
Makes sure a datacenter exists. If the state is run by an ``esxdatacenter`` minion, the name of the datacenter is retrieved from the proxy details, otherwise the datacenter has the same name as the state. Supported proxies: esxdatacenter name: Datacenter name. Ignored if the proxytype is ``esxdatacenter``.
def get_host_ipv4(name=None, mac=None, allow_array=False, **api_opts): ''' Get ipv4 address from host record. Use `allow_array` to return possible multiple values. CLI Examples: .. code-block:: bash salt-call infoblox.get_host_ipv4 host=localhost.domain.com salt-call infoblox.get_host_ipv4 mac=00:50:56:84:6e:ae ''' data = get_host(name=name, mac=mac, **api_opts) if data and 'ipv4addrs' in data: l = [] for a in data['ipv4addrs']: if 'ipv4addr' in a: l.append(a['ipv4addr']) if allow_array: return l if l: return l[0] return None
Get ipv4 address from host record. Use `allow_array` to return possible multiple values. CLI Examples: .. code-block:: bash salt-call infoblox.get_host_ipv4 host=localhost.domain.com salt-call infoblox.get_host_ipv4 mac=00:50:56:84:6e:ae
def has_integration(self, path, method): """ Checks if an API Gateway integration is already present at the given path/method :param string path: Path name :param string method: HTTP method :return: True, if an API Gateway integration is already present """ method = self._normalize_method_name(method) path_dict = self.get_path(path) return self.has_path(path, method) and \ isinstance(path_dict[method], dict) and \ self.method_has_integration(path_dict[method])
Checks if an API Gateway integration is already present at the given path/method :param string path: Path name :param string method: HTTP method :return: True, if an API Gateway integration is already present
def pop(self, key, resource_type): """ Extract an object from the list. If the key is not in the cache, this will raise a KeyError. If the list is empty, method will return None """ with self._objects_queue_lock: objects = self._objects_queue[key].get(resource_type, []) return objects.pop() if objects else None
Extract an object from the list. If the key is not in the cache, this will raise a KeyError. If the list is empty, method will return None
def get_sci_segs_for_ifo(ifo, cp, start_time, end_time, out_dir, tags=None): """ Obtain science segments for the selected ifo Parameters ----------- ifo : string The string describing the ifo to obtain science times for. start_time : gps time (either int/LIGOTimeGPS) The time at which to begin searching for segments. end_time : gps time (either int/LIGOTimeGPS) The time at which to stop searching for segments. out_dir : path The directory in which output will be stored. tag : string, optional (default=None) Use this to specify a tag. This can be used if this module is being called more than once to give call specific configuration (by setting options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). This is also used to tag the Files returned by the class to uniqueify the Files and uniqueify the actual filename. Returns -------- sci_segs : ligo.segments.segmentlist The segmentlist generated by this call sci_xml_file : pycbc.workflow.core.SegFile The workflow File object corresponding to this science segments file. out_sci_seg_name : string The name of the output segment list in the output XML file. """ if tags is None: tags = [] seg_valid_seg = segments.segment([start_time,end_time]) sci_seg_name = cp.get_opt_tags( "workflow-segments", "segments-%s-science-name" %(ifo.lower()), tags) sci_seg_url = cp.get_opt_tags( "workflow-segments", "segments-database-url", tags) # NOTE: ligolw_segment_query returns slightly strange output. The output # segment list is put in with name "RESULT". So this is hardcoded here out_sci_seg_name = "RESULT" if tags: sci_xml_file_path = os.path.join( out_dir, "%s-SCIENCE_SEGMENTS_%s.xml" \ %(ifo.upper(), '_'.join(tags))) tag_list=tags + ['SCIENCE'] else: sci_xml_file_path = os.path.join( out_dir, "%s-SCIENCE_SEGMENTS.xml" %(ifo.upper()) ) tag_list = ['SCIENCE'] if file_needs_generating(sci_xml_file_path, cp, tags=tags): seg_find_call = [ resolve_url(cp.get("executables","segment_query"), permissions=stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR), "--query-segments", "--segment-url", sci_seg_url, "--gps-start-time", str(start_time), "--gps-end-time", str(end_time), "--include-segments", sci_seg_name, "--output-file", sci_xml_file_path ] make_external_call(seg_find_call, out_dir=os.path.join(out_dir,'logs'), out_basename='%s-science-call' %(ifo.lower()) ) # Yes its yucky to generate a file and then read it back in. sci_xml_file_path = os.path.abspath(sci_xml_file_path) sci_xml_file = SegFile.from_segment_xml(sci_xml_file_path, tags=tag_list, valid_segment=seg_valid_seg) # NOTE: ligolw_segment_query returns slightly strange output. The output # segment_summary output does not use RESULT. Therefore move the # segment_summary across. sci_xml_file.seg_summ_dict[ifo.upper() + ":" + out_sci_seg_name] = \ sci_xml_file.seg_summ_dict[':'.join(sci_seg_name.split(':')[0:2])] sci_segs = sci_xml_file.return_union_seglist() return sci_segs, sci_xml_file, out_sci_seg_name
Obtain science segments for the selected ifo Parameters ----------- ifo : string The string describing the ifo to obtain science times for. start_time : gps time (either int/LIGOTimeGPS) The time at which to begin searching for segments. end_time : gps time (either int/LIGOTimeGPS) The time at which to stop searching for segments. out_dir : path The directory in which output will be stored. tag : string, optional (default=None) Use this to specify a tag. This can be used if this module is being called more than once to give call specific configuration (by setting options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). This is also used to tag the Files returned by the class to uniqueify the Files and uniqueify the actual filename. Returns -------- sci_segs : ligo.segments.segmentlist The segmentlist generated by this call sci_xml_file : pycbc.workflow.core.SegFile The workflow File object corresponding to this science segments file. out_sci_seg_name : string The name of the output segment list in the output XML file.
def unserialize(jsonstr): ''' Unserialize a JSON string representation of a topology ''' topod = json.loads(jsonstr) G = json_graph.node_link_graph(topod) for n,ndict in G.nodes(data=True): if 'nodeobj' not in ndict or 'type' not in ndict: raise Exception("Required type information is not present in serialized node {} :{}".format(n, ndict)) nobj = ndict['nodeobj'] cls = eval(ndict['type']) ndict['nodeobj'] = cls(**dict(nobj)) t = Topology(nxgraph=G) return t
Unserialize a JSON string representation of a topology
def temp_dir(sub_dir='work'): """Obtain the temporary working directory for the operating system. An inasafe subdirectory will automatically be created under this and if specified, a user subdirectory under that. .. note:: You can use this together with unique_filename to create a file in a temporary directory under the inasafe workspace. e.g. tmpdir = temp_dir('testing') tmpfile = unique_filename(dir=tmpdir) print tmpfile /tmp/inasafe/23-08-2012/timlinux/testing/tmpMRpF_C If you specify INASAFE_WORK_DIR as an environment var, it will be used in preference to the system temp directory. :param sub_dir: Optional argument which will cause an additional subdirectory to be created e.g. /tmp/inasafe/foo/ :type sub_dir: str :return: Path to the temp dir that is created. :rtype: str :raises: Any errors from the underlying system calls. """ user = getpass.getuser().replace(' ', '_') current_date = date.today() date_string = current_date.isoformat() if 'INASAFE_WORK_DIR' in os.environ: new_directory = os.environ['INASAFE_WORK_DIR'] else: # Following 4 lines are a workaround for tempfile.tempdir() # unreliabilty handle, filename = mkstemp() os.close(handle) new_directory = os.path.dirname(filename) os.remove(filename) path = os.path.join(new_directory, 'inasafe', date_string, user, sub_dir) if not os.path.exists(path): # Ensure that the dir is world writable # Umask sets the new mask and returns the old old_mask = os.umask(0000) os.makedirs(path, 0o777) # Reinstate the old mask for tmp os.umask(old_mask) return path
Obtain the temporary working directory for the operating system. An inasafe subdirectory will automatically be created under this and if specified, a user subdirectory under that. .. note:: You can use this together with unique_filename to create a file in a temporary directory under the inasafe workspace. e.g. tmpdir = temp_dir('testing') tmpfile = unique_filename(dir=tmpdir) print tmpfile /tmp/inasafe/23-08-2012/timlinux/testing/tmpMRpF_C If you specify INASAFE_WORK_DIR as an environment var, it will be used in preference to the system temp directory. :param sub_dir: Optional argument which will cause an additional subdirectory to be created e.g. /tmp/inasafe/foo/ :type sub_dir: str :return: Path to the temp dir that is created. :rtype: str :raises: Any errors from the underlying system calls.
def is_monotonic(full_list): """ Determine whether elements in a list are monotonic. ie. unique elements are clustered together. ie. [5,5,3,4] is, [5,3,5] is not. """ prev_elements = set({full_list[0]}) prev_item = full_list[0] for item in full_list: if item != prev_item: if item in prev_elements: return False prev_item = item prev_elements.add(item) return True
Determine whether elements in a list are monotonic. ie. unique elements are clustered together. ie. [5,5,3,4] is, [5,3,5] is not.
def _to_protobuf(self): """Convert the current query into the equivalent protobuf. Returns: google.cloud.firestore_v1beta1.types.StructuredQuery: The query protobuf. """ projection = self._normalize_projection(self._projection) orders = self._normalize_orders() start_at = self._normalize_cursor(self._start_at, orders) end_at = self._normalize_cursor(self._end_at, orders) query_kwargs = { "select": projection, "from": [ query_pb2.StructuredQuery.CollectionSelector( collection_id=self._parent.id ) ], "where": self._filters_pb(), "order_by": orders, "start_at": _cursor_pb(start_at), "end_at": _cursor_pb(end_at), } if self._offset is not None: query_kwargs["offset"] = self._offset if self._limit is not None: query_kwargs["limit"] = wrappers_pb2.Int32Value(value=self._limit) return query_pb2.StructuredQuery(**query_kwargs)
Convert the current query into the equivalent protobuf. Returns: google.cloud.firestore_v1beta1.types.StructuredQuery: The query protobuf.
def center(self): """Return footprint center in world coordinates, as GeoVector.""" image_center = Point(self.width / 2, self.height / 2) return self.to_world(image_center)
Return footprint center in world coordinates, as GeoVector.
def apply_security_groups(name, security_groups, region=None, key=None, keyid=None, profile=None): ''' Apply security groups to ELB. CLI example: .. code-block:: bash salt myminion boto_elb.apply_security_groups myelb '["mysecgroup1"]' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if isinstance(security_groups, six.string_types): security_groups = salt.utils.json.loads(security_groups) try: conn.apply_security_groups_to_lb(name, security_groups) log.info('Applied security_groups on ELB %s', name) return True except boto.exception.BotoServerError as e: log.debug(e) log.error('Failed to appply security_groups on ELB %s: %s', name, e.message) return False
Apply security groups to ELB. CLI example: .. code-block:: bash salt myminion boto_elb.apply_security_groups myelb '["mysecgroup1"]'
def transfer_learning_tuner(self, additional_parents=None, estimator=None): """Creates a new ``HyperparameterTuner`` by copying the request fields from the provided parent to the new instance of ``HyperparameterTuner``. Followed by addition of warm start configuration with the type as "TransferLearning" and parents as the union of provided list of ``additional_parents`` and the ``self``. Also, training image in the new tuner's estimator is updated with the provided ``training_image``. Args: additional_parents (set{str}): Set of additional parents along with the self to be used in warm starting the transfer learning tuner. estimator (sagemaker.estimator.EstimatorBase): An estimator object that has been initialized with the desired configuration. There does not need to be a training job associated with this instance. Returns: sagemaker.tuner.HyperparameterTuner: ``HyperparameterTuner`` instance which can be used to launch transfer learning tuning job. Examples: >>> parent_tuner = HyperparameterTuner.attach(tuning_job_name="parent-job-1") >>> transfer_learning_tuner = parent_tuner.transfer_learning_tuner(additional_parents={"parent-job-2"}) Later On: >>> transfer_learning_tuner.fit(inputs={}) """ return self._create_warm_start_tuner(additional_parents=additional_parents, warm_start_type=WarmStartTypes.TRANSFER_LEARNING, estimator=estimator)
Creates a new ``HyperparameterTuner`` by copying the request fields from the provided parent to the new instance of ``HyperparameterTuner``. Followed by addition of warm start configuration with the type as "TransferLearning" and parents as the union of provided list of ``additional_parents`` and the ``self``. Also, training image in the new tuner's estimator is updated with the provided ``training_image``. Args: additional_parents (set{str}): Set of additional parents along with the self to be used in warm starting the transfer learning tuner. estimator (sagemaker.estimator.EstimatorBase): An estimator object that has been initialized with the desired configuration. There does not need to be a training job associated with this instance. Returns: sagemaker.tuner.HyperparameterTuner: ``HyperparameterTuner`` instance which can be used to launch transfer learning tuning job. Examples: >>> parent_tuner = HyperparameterTuner.attach(tuning_job_name="parent-job-1") >>> transfer_learning_tuner = parent_tuner.transfer_learning_tuner(additional_parents={"parent-job-2"}) Later On: >>> transfer_learning_tuner.fit(inputs={})
def delete(self, robj, rw=None, r=None, w=None, dw=None, pr=None, pw=None, timeout=None): """ Deletes an object. """ raise NotImplementedError
Deletes an object.
def quadratic_jacobian_polynomial(nodes): r"""Compute the Jacobian determinant of a quadratic surface. .. note:: This is used **only** by :meth:`Surface._compute_valid` (which is in turn used to compute / cache the :attr:`Surface.is_valid` property). Converts :math:`\det(J(s, t))` to a polynomial on the reference triangle and represents it as a surface object. .. note:: This assumes that ``nodes`` is ``2 x 6`` but doesn't verify this. (However, the right multiplication by ``_QUADRATIC_JACOBIAN_HELPER`` would fail if ``nodes`` wasn't ``R x 6`` and then the ensuing determinants would fail if there weren't 2 rows.) Args: nodes (numpy.ndarray): A 2 x 6 array of nodes in a surface. Returns: numpy.ndarray: 1 x 6 array, coefficients in Bernstein basis. """ # First evaluate the Jacobian at each of the 6 nodes. jac_parts = _helpers.matrix_product(nodes, _QUADRATIC_JACOBIAN_HELPER) jac_at_nodes = np.empty((1, 6), order="F") jac_at_nodes[0, 0] = two_by_two_det(jac_parts[:, :2]) jac_at_nodes[0, 1] = two_by_two_det(jac_parts[:, 2:4]) jac_at_nodes[0, 2] = two_by_two_det(jac_parts[:, 4:6]) jac_at_nodes[0, 3] = two_by_two_det(jac_parts[:, 6:8]) jac_at_nodes[0, 4] = two_by_two_det(jac_parts[:, 8:10]) jac_at_nodes[0, 5] = two_by_two_det(jac_parts[:, 10:]) # Convert the nodal values to the Bernstein basis... bernstein = _helpers.matrix_product(jac_at_nodes, _QUADRATIC_TO_BERNSTEIN) return bernstein
r"""Compute the Jacobian determinant of a quadratic surface. .. note:: This is used **only** by :meth:`Surface._compute_valid` (which is in turn used to compute / cache the :attr:`Surface.is_valid` property). Converts :math:`\det(J(s, t))` to a polynomial on the reference triangle and represents it as a surface object. .. note:: This assumes that ``nodes`` is ``2 x 6`` but doesn't verify this. (However, the right multiplication by ``_QUADRATIC_JACOBIAN_HELPER`` would fail if ``nodes`` wasn't ``R x 6`` and then the ensuing determinants would fail if there weren't 2 rows.) Args: nodes (numpy.ndarray): A 2 x 6 array of nodes in a surface. Returns: numpy.ndarray: 1 x 6 array, coefficients in Bernstein basis.
def link(self): """Link all the types in this module and all included modules.""" if self.linked: return self self.linked = True included_modules = [] # Link includes for include in self.includes.values(): included_modules.append(include.link().surface) self.scope.add_surface('__includes__', tuple(included_modules)) self.scope.add_surface('__thrift_source__', self.thrift_source) # Link self for linker in LINKERS: linker(self.scope).link() self.scope.add_surface('loads', Deserializer(self.protocol)) self.scope.add_surface('dumps', Serializer(self.protocol)) return self
Link all the types in this module and all included modules.
def main(league, time, standings, team, live, use12hour, players, output_format, output_file, upcoming, lookup, listcodes, apikey): """ A CLI for live and past football scores from various football leagues. League codes: \b - WC: World Cup - EC: European Championship - CL: Champions League - PL: English Premier League - ELC: English Championship - FL1: French Ligue 1 - BL: German Bundesliga - SA: Serie A - DED: Eredivisie - PPL: Primeira Liga - PD: Primera Division - BSA: Brazil Serie A """ headers = {'X-Auth-Token': apikey} try: if output_format == 'stdout' and output_file: raise IncorrectParametersException('Printing output to stdout and ' 'saving to a file are mutually exclusive') writer = get_writer(output_format, output_file) rh = RequestHandler(headers, LEAGUE_IDS, TEAM_NAMES, writer) if listcodes: list_team_codes() return if live: rh.get_live_scores(use12hour) return if standings: if not league: raise IncorrectParametersException('Please specify a league. ' 'Example --standings --league=PL') if league == 'CL': raise IncorrectParametersException('Standings for CL - ' 'Champions League not supported') rh.get_standings(league) return if team: if lookup: map_team_id(team) return if players: rh.get_team_players(team) return else: rh.get_team_scores(team, time, upcoming, use12hour) return rh.get_league_scores(league, time, upcoming, use12hour) except IncorrectParametersException as e: click.secho(str(e), fg="red", bold=True)
A CLI for live and past football scores from various football leagues. League codes: \b - WC: World Cup - EC: European Championship - CL: Champions League - PL: English Premier League - ELC: English Championship - FL1: French Ligue 1 - BL: German Bundesliga - SA: Serie A - DED: Eredivisie - PPL: Primeira Liga - PD: Primera Division - BSA: Brazil Serie A
def status(*args): """ Get the current status of the fragments repository, limited to FILENAME(s) if specified. Limit output to files with status STATUS, if present. """ parser = argparse.ArgumentParser(prog="%s %s" % (__package__, status.__name__), description=status.__doc__) parser.add_argument('FILENAME', help="files to show status for", nargs="*", default=['.']) parser.add_argument('-l', '--limit', type=str, dest="STATUS", default='MDAE ', action="store", help="limit to files in STATUS") args = parser.parse_args(args) config = FragmentsConfig() yield "%s configuration version %s.%s.%s" % ((__package__,) + config['version']) yield "stored in %s" % config.directory for s, curr_path in _iterate_over_files(args.FILENAME, config, statuses=args.STATUS): yield _status_to_color.get(s, str)('%s\t%s' % (s, os.path.relpath(curr_path)))
Get the current status of the fragments repository, limited to FILENAME(s) if specified. Limit output to files with status STATUS, if present.
def verify(self): ''' Verify the correctness of the region arcs. Throws an VennRegionException if verification fails (or any other exception if it happens during verification). ''' # Verify size of arcs list if (len(self.arcs) < 2): raise VennRegionException("At least two arcs needed in a poly-arc region") if (len(self.arcs) > 4): raise VennRegionException("At most 4 arcs are supported currently for poly-arc regions") TRIG_TOL = 100*tol # We need to use looser tolerance level here because conversion to angles and back is prone to large errors. # Verify connectedness of arcs for i in range(len(self.arcs)): if not np.all(self.arcs[i-1].end_point() - self.arcs[i].start_point() < TRIG_TOL): raise VennRegionException("Arcs of an poly-arc-gon must be connected via endpoints") # Verify that arcs do not cross-intersect except at endpoints for i in range(len(self.arcs)-1): for j in range(i+1, len(self.arcs)): ips = self.arcs[i].intersect_arc(self.arcs[j]) for ip in ips: if not (np.all(abs(ip - self.arcs[i].start_point()) < TRIG_TOL) or np.all(abs(ip - self.arcs[i].end_point()) < TRIG_TOL)): raise VennRegionException("Arcs of a poly-arc-gon may only intersect at endpoints") if len(ips) != 0 and (i - j) % len(self.arcs) > 1 and (j - i) % len(self.arcs) > 1: # Two non-consecutive arcs intersect. This is in general not good, but # may occasionally happen when all arcs inbetween have length 0. pass # raise VennRegionException("Non-consecutive arcs of a poly-arc-gon may not intersect") # Verify that vertices are ordered so that at each point the direction along the polyarc changes towards the left. # Note that this test only makes sense for polyarcs obtained using circle intersections & subtractions. # A "flower-like" polyarc may have its vertices ordered counter-clockwise yet the direction would turn to the right at each of them. for i in range(len(self.arcs)): prev_arc = self.arcs[i-1] cur_arc = self.arcs[i] if box_product(prev_arc.direction_vector(prev_arc.to_angle), cur_arc.direction_vector(cur_arc.from_angle)) < -tol: raise VennRegionException("Arcs must be ordered so that the direction at each vertex changes counter-clockwise")
Verify the correctness of the region arcs. Throws an VennRegionException if verification fails (or any other exception if it happens during verification).
def rewrite_file_imports(item, vendored_libs): """Rewrite 'import xxx' and 'from xxx import' for vendored_libs""" text = item.read_text(encoding='utf-8') for lib in vendored_libs: text = re.sub( r'(\n\s*)import %s(\n\s*)' % lib, r'\1from pythonfinder._vendor import %s\2' % lib, text, ) text = re.sub( r'(\n\s*)from %s' % lib, r'\1from pythonfinder._vendor.%s' % lib, text, ) item.write_text(text, encoding='utf-8')
Rewrite 'import xxx' and 'from xxx import' for vendored_libs
def is_token_annotation_tier(self, tier): """ returns True, iff all events in the given tier annotate exactly one token. """ for i, event in enumerate(tier.iter('event')): if self.indexdelta(event.attrib['end'], event.attrib['start']) != 1: return False return True
returns True, iff all events in the given tier annotate exactly one token.
def alloc(self): """from _mosquitto_packet_alloc.""" byte = 0 remaining_bytes = bytearray(5) i = 0 remaining_length = self.remaining_length self.payload = None self.remaining_count = 0 loop_flag = True #self.dump() while loop_flag: byte = remaining_length % 128 remaining_length = remaining_length / 128 if remaining_length > 0: byte = byte | 0x80 remaining_bytes[self.remaining_count] = byte self.remaining_count += 1 if not (remaining_length > 0 and self.remaining_count < 5): loop_flag = False if self.remaining_count == 5: return NC.ERR_PAYLOAD_SIZE self.packet_length = self.remaining_length + 1 + self.remaining_count self.payload = bytearray(self.packet_length) self.payload[0] = self.command i = 0 while i < self.remaining_count: self.payload[i+1] = remaining_bytes[i] i += 1 self.pos = 1 + self.remaining_count return NC.ERR_SUCCESS
from _mosquitto_packet_alloc.
def adjust_status(info: dict) -> dict: """Apply status mapping to a raw API result.""" modified_info = deepcopy(info) modified_info.update({ 'level': get_nearest_by_numeric_key(STATUS_MAP, int(info['level'])), 'level2': STATUS_MAP[99] if info['level2'] is None else get_nearest_by_numeric_key(STATUS_MAP, int(info['level2'])) }) return modified_info
Apply status mapping to a raw API result.
def describe_subnet(subnet_id=None, subnet_name=None, region=None, key=None, keyid=None, profile=None): ''' Given a subnet id or name, describe its properties. Returns a dictionary of interesting properties. .. versionadded:: 2015.8.0 CLI Examples: .. code-block:: bash salt myminion boto_vpc.describe_subnet subnet_id=subnet-123456 salt myminion boto_vpc.describe_subnet subnet_name=mysubnet ''' try: subnet = _get_resource('subnet', name=subnet_name, resource_id=subnet_id, region=region, key=key, keyid=keyid, profile=profile) except BotoServerError as e: return {'error': __utils__['boto.get_error'](e)} if not subnet: return {'subnet': None} log.debug('Found subnet: %s', subnet.id) keys = ('id', 'cidr_block', 'availability_zone', 'tags', 'vpc_id') ret = {'subnet': dict((k, getattr(subnet, k)) for k in keys)} explicit_route_table_assoc = _get_subnet_explicit_route_table(ret['subnet']['id'], ret['subnet']['vpc_id'], conn=None, region=region, key=key, keyid=keyid, profile=profile) if explicit_route_table_assoc: ret['subnet']['explicit_route_table_association_id'] = explicit_route_table_assoc return ret
Given a subnet id or name, describe its properties. Returns a dictionary of interesting properties. .. versionadded:: 2015.8.0 CLI Examples: .. code-block:: bash salt myminion boto_vpc.describe_subnet subnet_id=subnet-123456 salt myminion boto_vpc.describe_subnet subnet_name=mysubnet
def run_cli(): "Command line interface to hiwenet." features_path, groups_path, weight_method, num_bins, edge_range, \ trim_outliers, trim_percentile, return_networkx_graph, out_weights_path = parse_args() # TODO add the possibility to process multiple combinations of parameters: diff subjects, diff metrics # for features_path to be a file containing multiple subjects (one/line) # -w could take multiple values kldiv,histint, # each line: input_features_path,out_weights_path features, groups = read_features_and_groups(features_path, groups_path) extract(features, groups, weight_method=weight_method, num_bins=num_bins, edge_range=edge_range, trim_outliers=trim_outliers, trim_percentile=trim_percentile, return_networkx_graph=return_networkx_graph, out_weights_path=out_weights_path)
Command line interface to hiwenet.
def iter_commands(self): """Iterator returning ImportCommand objects.""" while True: line = self.next_line() if line is None: if b'done' in self.features: raise errors.PrematureEndOfStream(self.lineno) break elif len(line) == 0 or line.startswith(b'#'): continue # Search for commands in order of likelihood elif line.startswith(b'commit '): yield self._parse_commit(line[len(b'commit '):]) elif line.startswith(b'blob'): yield self._parse_blob() elif line.startswith(b'done'): break elif line.startswith(b'progress '): yield commands.ProgressCommand(line[len(b'progress '):]) elif line.startswith(b'reset '): yield self._parse_reset(line[len(b'reset '):]) elif line.startswith(b'tag '): yield self._parse_tag(line[len(b'tag '):]) elif line.startswith(b'checkpoint'): yield commands.CheckpointCommand() elif line.startswith(b'feature'): yield self._parse_feature(line[len(b'feature '):]) else: self.abort(errors.InvalidCommand, line)
Iterator returning ImportCommand objects.
def pmag_results_extract(res_file="pmag_results.txt", crit_file="", spec_file="", age_file="", latex=False, grade=False, WD="."): """ Generate tab delimited output file(s) with result data. Save output files and return True if successful. Possible output files: Directions, Intensities, SiteNfo, Criteria, Specimens Optional Parameters (defaults are used if not specified) ---------- res_file : name of pmag_results file (default is "pmag_results.txt") crit_file : name of criteria file (default is "pmag_criteria.txt") spec_file : name of specimen file (default is "pmag_specimens.txt") age_file : name of age file (default is "er_ages.txt") latex : boolean argument to output in LaTeX (default is False) WD : path to directory that contains input files and takes output (default is current directory, '.') """ # format outfiles if latex: latex = 1 file_type = '.tex' else: latex = 0 file_type = '.txt' dir_path = os.path.realpath(WD) outfile = os.path.join(dir_path, 'Directions' + file_type) Ioutfile = os.path.join(dir_path, 'Intensities' + file_type) Soutfile = os.path.join(dir_path, 'SiteNfo' + file_type) Specout = os.path.join(dir_path, 'Specimens' + file_type) Critout = os.path.join(dir_path, 'Criteria' + file_type) # format infiles res_file = os.path.join(dir_path, res_file) if crit_file: crit_file = os.path.join(dir_path, crit_file) if spec_file: spec_file = os.path.join(dir_path, spec_file) else: grade = False # open output files f = open(outfile, 'w') sf = open(Soutfile, 'w') fI = open(Ioutfile, 'w') if crit_file: cr = open(Critout, 'w') # set up column headers Sites, file_type = pmag.magic_read(res_file) if crit_file: Crits, file_type = pmag.magic_read(crit_file) else: Crits = [] SiteCols = ["Site", "Location", "Lat. (N)", "Long. (E)", "Age ", "Age sigma", "Units"] SiteKeys = ["er_site_names", "average_lat", "average_lon", "average_age", "average_age_sigma", "average_age_unit"] DirCols = ["Site", 'Comp.', "perc TC", "Dec.", "Inc.", "Nl", "Np", "k ", "R", "a95", "PLat", "PLong"] DirKeys = ["er_site_names", "pole_comp_name", "tilt_correction", "average_dec", "average_inc", "average_n_lines", "average_n_planes", "average_k", "average_r", "average_alpha95", "vgp_lat", "vgp_lon"] IntCols = ["Site", "N", "B (uT)", "sigma", "sigma perc", "VADM", "VADM sigma"] IntKeys = ["er_site_names", "average_int_n", "average_int", "average_int_sigma", 'average_int_sigma_perc', "vadm", "vadm_sigma"] AllowedKeys = ['specimen_frac', 'specimen_scat', 'specimen_gap_max', 'measurement_step_min', 'measurement_step_max', 'measurement_step_unit', 'specimen_polarity', 'specimen_nrm', 'specimen_direction_type', 'specimen_comp_nmb', 'specimen_mad', 'specimen_alpha95', 'specimen_n', 'specimen_int_sigma', 'specimen_int_sigma_perc', 'specimen_int_rel_sigma', 'specimen_int_rel_sigma_perc', 'specimen_int_mad', 'specimen_int_n', 'specimen_w', 'specimen_q', 'specimen_f', 'specimen_fvds', 'specimen_b_sigma', 'specimen_b_beta', 'specimen_g', 'specimen_dang', 'specimen_md', 'specimen_ptrm', 'specimen_drat', 'specimen_drats', 'specimen_rsc', 'specimen_viscosity_index', 'specimen_magn_moment', 'specimen_magn_volume', 'specimen_magn_mass', 'specimen_int_ptrm_n', 'specimen_delta', 'specimen_theta', 'specimen_gamma', 'sample_polarity', 'sample_nrm', 'sample_direction_type', 'sample_comp_nmb', 'sample_sigma', 'sample_alpha95', 'sample_n', 'sample_n_lines', 'sample_n_planes', 'sample_k', 'sample_r', 'sample_tilt_correction', 'sample_int_sigma', 'sample_int_sigma_perc', 'sample_int_rel_sigma', 'sample_int_rel_sigma_perc', 'sample_int_n', 'sample_magn_moment', 'sample_magn_volume', 'sample_magn_mass', 'site_polarity', 'site_nrm', 'site_direction_type', 'site_comp_nmb', 'site_sigma', 'site_alpha95', 'site_n', 'site_n_lines', 'site_n_planes', 'site_k', 'site_r', 'site_tilt_correction', 'site_int_sigma', 'site_int_sigma_perc', 'site_int_rel_sigma', 'site_int_rel_sigma_perc', 'site_int_n', 'site_magn_moment', 'site_magn_volume', 'site_magn_mass', 'average_age_min', 'average_age_max', 'average_age_sigma', 'average_age_unit', 'average_sigma', 'average_alpha95', 'average_n', 'average_nn', 'average_k', 'average_r', 'average_int_sigma', 'average_int_rel_sigma', 'average_int_rel_sigma_perc', 'average_int_n', 'average_int_nn', 'vgp_dp', 'vgp_dm', 'vgp_sigma', 'vgp_alpha95', 'vgp_n', 'vdm_sigma', 'vdm_n', 'vadm_sigma', 'vadm_n'] if crit_file: crit = Crits[0] # get a list of useful keys for key in list(crit.keys()): if key not in AllowedKeys: del(crit[key]) for key in list(crit.keys()): if (not crit[key]) or (eval(crit[key]) > 1000) or (eval(crit[key]) == 0): # get rid of all blank or too big ones or too little ones del(crit[key]) CritKeys = list(crit.keys()) if spec_file: Specs, file_type = pmag.magic_read(spec_file) fsp = open(Specout, 'w') # including specimen intensities if desired SpecCols = ["Site", "Specimen", "B (uT)", "MAD", "Beta", "N", "Q", "DANG", "f-vds", "DRATS", "T (C)"] SpecKeys = ['er_site_name', 'er_specimen_name', 'specimen_int', 'specimen_int_mad', 'specimen_b_beta', 'specimen_int_n', 'specimen_q', 'specimen_dang', 'specimen_fvds', 'specimen_drats', 'trange'] Xtra = ['specimen_frac', 'specimen_scat', 'specimen_gmax'] if grade: SpecCols.append('Grade') SpecKeys.append('specimen_grade') for x in Xtra: # put in the new intensity keys if present if x in list(Specs[0].keys()): SpecKeys.append(x) newkey = "" for k in x.split('_')[1:]: newkey = newkey + k + '_' SpecCols.append(newkey.strip('_')) SpecCols.append('Corrections') SpecKeys.append('corrections') # these should be multiplied by 1e6 Micro = ['specimen_int', 'average_int', 'average_int_sigma'] Zeta = ['vadm', 'vadm_sigma'] # these should be multiplied by 1e21 # write out the header information for each output file if latex: # write out the latex header stuff sep = ' & ' end = '\\\\' f.write('\\documentclass{article}\n') f.write('\\usepackage[margin=1in]{geometry}\n') f.write('\\usepackage{longtable}\n') f.write('\\begin{document}\n') sf.write('\\documentclass{article}\n') sf.write('\\usepackage[margin=1in]{geometry}\n') sf.write('\\usepackage{longtable}\n') sf.write('\\begin{document}\n') fI.write('\\documentclass{article}\n') fI.write('\\usepackage[margin=1in]{geometry}\n') fI.write('\\usepackage{longtable}\n') fI.write('\\begin{document}\n') if crit_file: cr.write('\\documentclass{article}\n') cr.write('\\usepackage[margin=1in]{geometry}\n') cr.write('\\usepackage{longtable}\n') cr.write('\\begin{document}\n') if spec_file: fsp.write('\\documentclass{article}\n') fsp.write('\\usepackage[margin=1in]{geometry}\n') fsp.write('\\usepackage{longtable}\n') fsp.write('\\begin{document}\n') tabstring = '\\begin{longtable}{' fstring = tabstring for k in range(len(SiteCols)): fstring = fstring + 'r' sf.write(fstring + '}\n') sf.write('\hline\n') fstring = tabstring for k in range(len(DirCols)): fstring = fstring + 'r' f.write(fstring + '}\n') f.write('\hline\n') fstring = tabstring for k in range(len(IntCols)): fstring = fstring + 'r' fI.write(fstring + '}\n') fI.write('\hline\n') fstring = tabstring if crit_file: for k in range(len(CritKeys)): fstring = fstring + 'r' cr.write(fstring + '}\n') cr.write('\hline\n') if spec_file: fstring = tabstring for k in range(len(SpecCols)): fstring = fstring + 'r' fsp.write(fstring + '}\n') fsp.write('\hline\n') else: # just set the tab and line endings for tab delimited sep = ' \t ' end = '' # now write out the actual column headers Soutstring, Doutstring, Ioutstring, Spoutstring, Croutstring = "", "", "", "", "" for k in range(len(SiteCols)): Soutstring = Soutstring + SiteCols[k] + sep Soutstring = Soutstring.strip(sep) Soutstring = Soutstring + end + '\n' sf.write(Soutstring) for k in range(len(DirCols)): Doutstring = Doutstring + DirCols[k] + sep Doutstring = Doutstring.strip(sep) Doutstring = Doutstring + end + '\n' f.write(Doutstring) for k in range(len(IntCols)): Ioutstring = Ioutstring + IntCols[k] + sep Ioutstring = Ioutstring.strip(sep) Ioutstring = Ioutstring + end + '\n' fI.write(Ioutstring) if crit_file: for k in range(len(CritKeys)): Croutstring = Croutstring + CritKeys[k] + sep Croutstring = Croutstring.strip(sep) Croutstring = Croutstring + end + '\n' cr.write(Croutstring) if spec_file: for k in range(len(SpecCols)): Spoutstring = Spoutstring + SpecCols[k] + sep Spoutstring = Spoutstring.strip(sep) Spoutstring = Spoutstring + end + "\n" fsp.write(Spoutstring) if latex: # put in a horizontal line in latex file f.write('\hline\n') sf.write('\hline\n') fI.write('\hline\n') if crit_file: cr.write('\hline\n') if spec_file: fsp.write('\hline\n') # do criteria if crit_file: for crit in Crits: Croutstring = "" for key in CritKeys: Croutstring = Croutstring + crit[key] + sep Croutstring = Croutstring.strip(sep) + end cr.write(Croutstring + '\n') # do directions # get all results with VGPs VGPs = pmag.get_dictitem(Sites, 'vgp_lat', '', 'F') VGPs = pmag.get_dictitem(VGPs, 'data_type', 'i', 'T') # get site level stuff for site in VGPs: if len(site['er_site_names'].split(":")) == 1: if 'er_sample_names' not in list(site.keys()): site['er_sample_names'] = '' if 'pole_comp_name' not in list(site.keys()): site['pole_comp_name'] = "A" if 'average_nn' not in list(site.keys()) and 'average_n' in list(site.keys()): site['average_nn'] = site['average_n'] if 'average_n_lines' not in list(site.keys()): site['average_n_lines'] = site['average_nn'] if 'average_n_planes' not in list(site.keys()): site['average_n_planes'] = "" Soutstring, Doutstring = "", "" for key in SiteKeys: if key in list(site.keys()): Soutstring = Soutstring + site[key] + sep Soutstring = Soutstring.strip(sep) + end sf.write(Soutstring + '\n') for key in DirKeys: if key in list(site.keys()): Doutstring = Doutstring + site[key] + sep Doutstring = Doutstring.strip(sep) + end f.write(Doutstring + '\n') # now do intensities VADMs = pmag.get_dictitem(Sites, 'vadm', '', 'F') VADMs = pmag.get_dictitem(VADMs, 'data_type', 'i', 'T') for site in VADMs: # do results level stuff if site not in VGPs: Soutstring = "" for key in SiteKeys: if key in list(site.keys()): Soutstring = Soutstring + site[key] + sep else: Soutstring = Soutstring + " " + sep Soutstring = Soutstring.strip(sep) + end sf.write(Soutstring + '\n') if len(site['er_site_names'].split(":")) == 1 and site['data_type'] == 'i': if 'average_int_sigma_perc' not in list(site.keys()): site['average_int_sigma_perc'] = "0" if site["average_int_sigma"] == "": site["average_int_sigma"] = "0" if site["average_int_sigma_perc"] == "": site["average_int_sigma_perc"] = "0" if site["vadm"] == "": site["vadm"] = "0" if site["vadm_sigma"] == "": site["vadm_sigma"] = "0" for key in list(site.keys()): # reformat vadms, intensities if key in Micro: site[key] = '%7.1f' % (float(site[key]) * 1e6) if key in Zeta: site[key] = '%7.1f' % (float(site[key]) * 1e-21) outstring = "" for key in IntKeys: if key not in list(site.keys()): site[key] = "" outstring = outstring + site[key] + sep outstring = outstring.strip(sep) + end + '\n' fI.write(outstring) # VDMs=pmag.get_dictitem(Sites,'vdm','','F') # get non-blank VDMs # for site in VDMs: # do results level stuff # if len(site['er_site_names'].split(":"))==1: # if 'average_int_sigma_perc' not in site.keys():site['average_int_sigma_perc']="0" # if site["average_int_sigma"]=="":site["average_int_sigma"]="0" # if site["average_int_sigma_perc"]=="":site["average_int_sigma_perc"]="0" # if site["vadm"]=="":site["vadm"]="0" # if site["vadm_sigma"]=="":site["vadm_sigma"]="0" # for key in site.keys(): # reformat vadms, intensities # if key in Micro: site[key]='%7.1f'%(float(site[key])*1e6) # if key in Zeta: site[key]='%7.1f'%(float(site[key])*1e-21) # outstring="" # for key in IntKeys: # outstring=outstring+site[key]+sep # fI.write(outstring.strip(sep)+'\n') if spec_file: SpecsInts = pmag.get_dictitem(Specs, 'specimen_int', '', 'F') for spec in SpecsInts: spec['trange'] = '%i' % (int(float(spec['measurement_step_min']) - 273)) + \ '-' + '%i' % (int(float(spec['measurement_step_max']) - 273)) meths = spec['magic_method_codes'].split(':') corrections = '' for meth in meths: if 'DA' in meth: corrections = corrections + meth[3:] + ':' corrections = corrections.strip(':') if corrections.strip() == "": corrections = "None" spec['corrections'] = corrections outstring = "" for key in SpecKeys: if key in Micro: spec[key] = '%7.1f' % (float(spec[key]) * 1e6) if key in Zeta: spec[key] = '%7.1f' % (float(spec[key]) * 1e-21) outstring = outstring + spec[key] + sep fsp.write(outstring.strip(sep) + end + '\n') # if latex: # write out the tail stuff f.write('\hline\n') sf.write('\hline\n') fI.write('\hline\n') f.write('\end{longtable}\n') sf.write('\end{longtable}\n') fI.write('\end{longtable}\n') f.write('\end{document}\n') sf.write('\end{document}\n') fI.write('\end{document}\n') if spec_file: fsp.write('\hline\n') fsp.write('\end{longtable}\n') fsp.write('\end{document}\n') if crit_file: cr.write('\hline\n') cr.write('\end{longtable}\n') cr.write('\end{document}\n') f.close() sf.close() fI.close() print('data saved in: ', outfile, Ioutfile, Soutfile) outfiles = [outfile, Ioutfile, Soutfile] if spec_file: fsp.close() print('specimen data saved in: ', Specout) outfiles.append(Specout) if crit_file: cr.close() print('Selection criteria saved in: ', Critout) outfiles.append(Critout) return True, outfiles
Generate tab delimited output file(s) with result data. Save output files and return True if successful. Possible output files: Directions, Intensities, SiteNfo, Criteria, Specimens Optional Parameters (defaults are used if not specified) ---------- res_file : name of pmag_results file (default is "pmag_results.txt") crit_file : name of criteria file (default is "pmag_criteria.txt") spec_file : name of specimen file (default is "pmag_specimens.txt") age_file : name of age file (default is "er_ages.txt") latex : boolean argument to output in LaTeX (default is False) WD : path to directory that contains input files and takes output (default is current directory, '.')
def source(self): """Source.""" if "source" not in self.attrs.keys(): self.attrs["source"] = "None" value = self.attrs["source"] return value if not value == "None" else None
Source.
def OnRowSize(self, event): """Row size event handler""" row = event.GetRowOrCol() tab = self.grid.current_table rowsize = self.grid.GetRowSize(row) / self.grid.grid_renderer.zoom # Detect for resizing group of rows rows = self.grid.GetSelectedRows() if len(rows) == 0: rows = [row, ] # Detect for selection of rows spanning all columns selection = self.grid.selection num_cols = self.grid.code_array.shape[1]-1 for box in zip(selection.block_tl, selection.block_br): leftmost_col = box[0][1] rightmost_col = box[1][1] if leftmost_col == 0 and rightmost_col == num_cols: rows += range(box[0][0], box[1][0]+1) # All row resizing is undone in one click with undo.group(_("Resize Rows")): for row in rows: self.grid.code_array.set_row_height(row, tab, rowsize) zoomed_rowsize = rowsize * self.grid.grid_renderer.zoom self.grid.SetRowSize(row, zoomed_rowsize) # Mark content as changed post_command_event(self.grid.main_window, self.grid.ContentChangedMsg) event.Skip() self.grid.ForceRefresh()
Row size event handler
def GetBaseFiles(self, diff): """Helper that calls GetBase file for each file in the patch. Returns: A dictionary that maps from filename to GetBaseFile's tuple. Filenames are retrieved based on lines that start with "Index:" or "Property changes on:". """ files = {} for line in diff.splitlines(True): if line.startswith('Index:') or line.startswith('Property changes on:'): unused, filename = line.split(':', 1) # On Windows if a file has property changes its filename uses '\' # instead of '/'. filename = to_slash(filename.strip()) files[filename] = self.GetBaseFile(filename) return files
Helper that calls GetBase file for each file in the patch. Returns: A dictionary that maps from filename to GetBaseFile's tuple. Filenames are retrieved based on lines that start with "Index:" or "Property changes on:".
def hse_output(pdb_file, file_type): """ The solvent exposure of an amino acid residue is important for analyzing, understanding and predicting aspects of protein structure and function [73]. A residue's solvent exposure can be classified as four categories: exposed, partly exposed, buried and deeply buried residues. Hamelryck et al. [73] established a new 2D measure that provides a different view of solvent exposure, i.e. half-sphere exposure (HSE). By conceptually dividing the sphere of a residue into two halves- HSE-up and HSE-down, HSE provides a more detailed description of an amino acid residue's spatial neighborhood. HSE is calculated by the hsexpo module implemented in the BioPython package [74] from a PDB file. http://onlinelibrary.wiley.com/doi/10.1002/prot.20379/abstract Args: pdb_file: Returns: """ # Get the first model my_structure = StructureIO(pdb_file) model = my_structure.first_model # Calculate HSEalpha exp_ca = HSExposureCA(model) # Calculate HSEbeta exp_cb = HSExposureCB(model) # Calculate classical coordination number exp_fs = ExposureCN(model) return
The solvent exposure of an amino acid residue is important for analyzing, understanding and predicting aspects of protein structure and function [73]. A residue's solvent exposure can be classified as four categories: exposed, partly exposed, buried and deeply buried residues. Hamelryck et al. [73] established a new 2D measure that provides a different view of solvent exposure, i.e. half-sphere exposure (HSE). By conceptually dividing the sphere of a residue into two halves- HSE-up and HSE-down, HSE provides a more detailed description of an amino acid residue's spatial neighborhood. HSE is calculated by the hsexpo module implemented in the BioPython package [74] from a PDB file. http://onlinelibrary.wiley.com/doi/10.1002/prot.20379/abstract Args: pdb_file: Returns:
def fix_shapes(self): """ Fixes the shape of the data fields on edges. Left edges should be column vectors, and top edges should be row vectors, for example. """ for i in xrange(self.n_chunks): for side in ['left', 'right', 'top', 'bottom']: edge = getattr(self, side).ravel()[i] if side in ['left', 'right']: shp = [edge.todo.size, 1] else: shp = [1, edge.todo.size] edge.done = edge.done.reshape(shp) edge.data = edge.data.reshape(shp) edge.todo = edge.todo.reshape(shp)
Fixes the shape of the data fields on edges. Left edges should be column vectors, and top edges should be row vectors, for example.
def send_registered_email(self, user, user_email, request_email_confirmation): """Send the 'user has registered' notification email.""" # Verify config settings if not self.user_manager.USER_ENABLE_EMAIL: return if not self.user_manager.USER_SEND_REGISTERED_EMAIL: return # The registered email is sent to a specific user_email.email or user.email email = user_email.email if user_email else user.email # Add a request to confirm email if needed if request_email_confirmation: # Generate a confirm_email_link token = self.user_manager.generate_token(user_email.id if user_email else user.id) confirm_email_link = url_for('user.confirm_email', token=token, _external=True) else: confirm_email_link = None # Render email from templates and send it via the configured EmailAdapter self._render_and_send_email( email, user, self.user_manager.USER_REGISTERED_EMAIL_TEMPLATE, confirm_email_link=confirm_email_link, )
Send the 'user has registered' notification email.
def _iterate(self, url, params, api_entity): """ Args: url: params: api_entity: Return: """ params['resultLimit'] = self.result_limit should_iterate = True result_start = 0 while should_iterate: # params['resultOffset'] = result_offset params['resultStart'] = result_start r = self.tcex.session.get(url, params=params) if not self.success(r): err = r.text or r.reason self.tcex.handle_error(950, [r.status_code, err, r.url]) data = r.json().get('data').get(api_entity) if len(data) < self.result_limit: should_iterate = False result_start += self.result_limit for result in data: yield result
Args: url: params: api_entity: Return:
def _show_annotation_box(self, event): """Update an existing box or create an annotation box for an event.""" ax = event.artist.axes # Get the pre-created annotation box for the axes or create a new one. if self.display != 'multiple': annotation = self.annotations[ax] elif event.mouseevent in self.annotations: # Avoid creating multiple datacursors for the same click event # when several artists are selected. annotation = self.annotations[event.mouseevent] else: annotation = self.annotate(ax, **self._annotation_kwargs) self.annotations[event.mouseevent] = annotation if self.display == 'single': # Hide any other annotation boxes... for ann in self.annotations.values(): ann.set_visible(False) self.update(event, annotation)
Update an existing box or create an annotation box for an event.
def send_json_message(address, message, **kwargs): """ a shortcut for message sending """ data = { 'message': message, } if not kwargs.get('subject_id'): data['subject_id'] = address data.update(kwargs) hxdispatcher.send(address, data)
a shortcut for message sending
def orc(self, path): """Loads a ORC file stream, returning the result as a :class:`DataFrame`. .. note:: Evolving. >>> orc_sdf = spark.readStream.schema(sdf_schema).orc(tempfile.mkdtemp()) >>> orc_sdf.isStreaming True >>> orc_sdf.schema == sdf_schema True """ if isinstance(path, basestring): return self._df(self._jreader.orc(path)) else: raise TypeError("path can be only a single string")
Loads a ORC file stream, returning the result as a :class:`DataFrame`. .. note:: Evolving. >>> orc_sdf = spark.readStream.schema(sdf_schema).orc(tempfile.mkdtemp()) >>> orc_sdf.isStreaming True >>> orc_sdf.schema == sdf_schema True
def model_installed(name): """Check if spaCy language model is installed. From https://github.com/explosion/spaCy/blob/master/spacy/util.py :param name: :return: """ data_path = util.get_data_path() if not data_path or not data_path.exists(): raise IOError(f"Can't find spaCy data path: {data_path}") if name in {d.name for d in data_path.iterdir()}: return True if Spacy.is_package(name): # installed as package return True if Path(name).exists(): # path to model data directory return True return False
Check if spaCy language model is installed. From https://github.com/explosion/spaCy/blob/master/spacy/util.py :param name: :return:
def _update_dPrxy(self): """Update `dPrxy`.""" super(ExpCM_fitprefs, self)._update_dPrxy() if 'zeta' in self.freeparams: tildeFrxyQxy = self.tildeFrxy * self.Qxy j = 0 zetaxterm = scipy.ndarray((self.nsites, N_CODON, N_CODON), dtype='float') zetayterm = scipy.ndarray((self.nsites, N_CODON, N_CODON), dtype='float') for r in range(self.nsites): for i in range(N_AA - 1): zetari = self.zeta[j] zetaxterm.fill(0) zetayterm.fill(0) zetaxterm[r][self._aa_for_x > i] = -1.0 / zetari zetaxterm[r][self._aa_for_x == i] = -1.0 / (zetari - 1.0) zetayterm[r][self._aa_for_y > i] = 1.0 / zetari zetayterm[r][self._aa_for_y == i] = 1.0 / (zetari - 1.0) self.dPrxy['zeta'][j] = tildeFrxyQxy * (zetayterm + zetaxterm) _fill_diagonals(self.dPrxy['zeta'][j], self._diag_indices) j += 1
Update `dPrxy`.
def noise_get_fbm( n: tcod.noise.Noise, f: Sequence[float], oc: float, typ: int = NOISE_DEFAULT, ) -> float: """Return the fractal Brownian motion sampled from the ``f`` coordinate. Args: n (Noise): A Noise instance. f (Sequence[float]): The point to sample the noise from. typ (int): The noise algorithm to use. octaves (float): The level of level. Should be more than 1. Returns: float: The sampled noise value. """ return float( lib.TCOD_noise_get_fbm_ex(n.noise_c, ffi.new("float[4]", f), oc, typ) )
Return the fractal Brownian motion sampled from the ``f`` coordinate. Args: n (Noise): A Noise instance. f (Sequence[float]): The point to sample the noise from. typ (int): The noise algorithm to use. octaves (float): The level of level. Should be more than 1. Returns: float: The sampled noise value.
def safe_write(filename, blob): """ A two-step write. :param filename: full path :param blob: binary data :return: None """ temp_file = filename + '.saving' with open(temp_file, 'bw') as f: f.write(blob) os.rename(temp_file, filename)
A two-step write. :param filename: full path :param blob: binary data :return: None
def stop_change(self): """Stop changing light level manually""" self.logger.info("Dimmer %s stop_change", self.device_id) self.hub.direct_command(self.device_id, '18', '00') success = self.hub.check_success(self.device_id, '18', '00') if success: self.logger.info("Dimmer %s stop_change: Light stopped changing successfully", self.device_id) self.hub.clear_device_command_cache(self.device_id) else: self.logger.error("Dimmer %s stop_change: Light did not stop", self.device_id) return success
Stop changing light level manually
def _parse_string_host(host_str): """ Parse host string into a dictionary host :param host_str: :return: """ host_str = EsParser._fix_host_prefix(host_str) parsed_url = urlparse(host_str) host = {HostParsing.HOST: parsed_url.hostname} if parsed_url.port: host[HostParsing.PORT] = parsed_url.port if parsed_url.scheme == HostParsing.HTTPS: host[HostParsing.PORT] = parsed_url.port or EsParser.SSL_DEFAULT_PORT host[HostParsing.USE_SSL] = True host[HostParsing.SCHEME] = HostParsing.HTTPS elif parsed_url.scheme: host[HostParsing.SCHEME] = parsed_url.scheme if parsed_url.username or parsed_url.password: host[HostParsing.HTTP_AUTH] = '%s:%s' % (parsed_url.username, parsed_url.password) if parsed_url.path and parsed_url.path != '/': host[HostParsing.URL_PREFIX] = parsed_url.path return host
Parse host string into a dictionary host :param host_str: :return:
def items(self): """ Request URL and parse response. Yield a ``Torrent`` for every torrent on page. """ request = get(str(self.url), headers={'User-Agent' : "Magic Browser","origin_req_host" : "thepiratebay.se"}) root = html.fromstring(request.text) items = [self._build_torrent(row) for row in self._get_torrent_rows(root)] for item in items: yield item
Request URL and parse response. Yield a ``Torrent`` for every torrent on page.
def text_color(self, value): """ Setter for **self.__text_color** attribute. :param value: Attribute value. :type value: int or QColor """ if value is not None: assert type(value) in (Qt.GlobalColor, QColor), \ "'{0}' attribute: '{1}' type is not 'int' or 'QColor'!".format("text_color", value) self.__text_color = value
Setter for **self.__text_color** attribute. :param value: Attribute value. :type value: int or QColor
def ed25519_public_key_to_string(key): """Convert an ed25519 public key to a base64-encoded string. Args: key (Ed25519PublicKey): the key to write to the file. Returns: str: the key representation as a str """ return base64.b64encode(key.public_bytes( encoding=serialization.Encoding.Raw, format=serialization.PublicFormat.Raw, ), None).decode('utf-8')
Convert an ed25519 public key to a base64-encoded string. Args: key (Ed25519PublicKey): the key to write to the file. Returns: str: the key representation as a str
def to_gremlin(self): """Return a unicode object with the Gremlin representation of this block.""" self.validate() if len(self.start_class) == 1: # The official Gremlin documentation claims that this approach # is generally faster than the one below, since it makes using indexes easier. # http://gremlindocs.spmallette.documentup.com/#filter/has start_class = list(self.start_class)[0] return u'g.V({}, {})'.format('\'@class\'', safe_quoted_string(start_class)) else: start_classes_list = ','.join(safe_quoted_string(x) for x in self.start_class) return u'g.V.has(\'@class\', T.in, [{}])'.format(start_classes_list)
Return a unicode object with the Gremlin representation of this block.
def check_forbidden_filename(filename, destiny_os=os.name, restricted_names=restricted_names): ''' Get if given filename is forbidden for current OS or filesystem. :param filename: :param destiny_os: destination operative system :param fs_encoding: destination filesystem filename encoding :return: wether is forbidden on given OS (or filesystem) or not :rtype: bool ''' return ( filename in restricted_names or destiny_os == 'nt' and filename.split('.', 1)[0].upper() in nt_device_names )
Get if given filename is forbidden for current OS or filesystem. :param filename: :param destiny_os: destination operative system :param fs_encoding: destination filesystem filename encoding :return: wether is forbidden on given OS (or filesystem) or not :rtype: bool
def assign_yourself(self): """ Assigning the workflow to itself. The selected job is checked to see if there is an assigned role. If it does not have a role assigned to it, it takes the job to itself and displays a message that the process is successful. If there is a role assigned to it, it does not do any operation and the message is displayed on the screen. .. code-block:: python # request: { 'task_inv_key': string, } """ task_invitation = TaskInvitation.objects.get(self.task_invitation_key) wfi = task_invitation.instance if not wfi.current_actor.exist: wfi.current_actor = self.current.role wfi.save() [inv.delete() for inv in TaskInvitation.objects.filter(instance=wfi) if not inv == task_invitation] title = _(u"Successful") msg = _(u"You have successfully assigned the job to yourself.") else: title = _(u"Unsuccessful") msg = _(u"Unfortunately, this job is already taken by someone else.") self.current.msg_box(title=title, msg=msg)
Assigning the workflow to itself. The selected job is checked to see if there is an assigned role. If it does not have a role assigned to it, it takes the job to itself and displays a message that the process is successful. If there is a role assigned to it, it does not do any operation and the message is displayed on the screen. .. code-block:: python # request: { 'task_inv_key': string, }
def eaSimpleConverge(population, toolbox, cxpb, mutpb, ngen, stats=None, halloffame=None, callback=None, verbose=True): """This algorithm reproduce the simplest evolutionary algorithm as presented in chapter 7 of [Back2000]_. Modified to allow checking if there is no change for ngen, as a simple rule for convergence. Interface is similar to eaSimple(). However, in eaSimple, ngen is total number of iterations; in eaSimpleConverge, we terminate only when the best is NOT updated for ngen iterations. """ # Evaluate the individuals with an invalid fitness invalid_ind = [ind for ind in population if not ind.fitness.valid] fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit if halloffame is not None: halloffame.update(population) record = stats.compile(population) if stats else {} # Begin the generational process gen = 1 best = (0,) while True: # Select the next generation individuals offspring = toolbox.select(population, len(population)) # Vary the pool of individuals offspring = varAnd(offspring, toolbox, cxpb, mutpb) # Evaluate the individuals with an invalid fitness invalid_ind = [ind for ind in offspring if not ind.fitness.valid] fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit # Update the hall of fame with the generated individuals if halloffame is not None: halloffame.update(offspring) if callback is not None: callback(halloffame[0], gen) # Replace the current population by the offspring population[:] = offspring # Append the current generation statistics to the logbook record = stats.compile(population) if stats else {} current_best = record['max'] if gen % 20 == 0 and verbose: print("Current iteration {0}: max_score={1}". format(gen, current_best), file=sys.stderr) if current_best > best: best = current_best updated = gen gen += 1 if gen - updated > ngen: break return population
This algorithm reproduce the simplest evolutionary algorithm as presented in chapter 7 of [Back2000]_. Modified to allow checking if there is no change for ngen, as a simple rule for convergence. Interface is similar to eaSimple(). However, in eaSimple, ngen is total number of iterations; in eaSimpleConverge, we terminate only when the best is NOT updated for ngen iterations.
def signature(self, name, file_name, file_type, file_content, owner=None, **kwargs): """ Create the Signature TI object. Args: owner: file_content: file_name: file_type: name: **kwargs: Return: """ return Signature(self.tcex, name, file_name, file_type, file_content, owner=owner, **kwargs)
Create the Signature TI object. Args: owner: file_content: file_name: file_type: name: **kwargs: Return: