code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def add_system(self, system): ''' Add system to the world. All systems will be processed on World.process() system is of type System ''' if system not in self._systems: system.set_world(self) self._systems.append(system) else: raise DuplicateSystemError(system)
Add system to the world. All systems will be processed on World.process() system is of type System
def alias_repository(self, repository_id=None, alias_id=None): """Adds an ``Id`` to a ``Repository`` for the purpose of creating compatibility. The primary ``Id`` of the ``Repository`` is determined by the provider. The new ``Id`` is an alias to the primary ``Id``. If the alias is a pointer to another repository, it is reassigned to the given repository ``Id``. arg: repository_id (osid.id.Id): the ``Id`` of a ``Repository`` arg: alias_id (osid.id.Id): the alias ``Id`` raise: AlreadyExists - ``alias_id`` is in use as a primary ``Id`` raise: NotFound - ``repository_id`` not found raise: NullArgument - ``repository_id`` or ``alias_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from awsosid template for - # osid.resource.BinAdminSession.alias_bin_template if not self._can('alias'): raise PermissionDenied() else: return self._provider_session.alias_repository(repository_id)
Adds an ``Id`` to a ``Repository`` for the purpose of creating compatibility. The primary ``Id`` of the ``Repository`` is determined by the provider. The new ``Id`` is an alias to the primary ``Id``. If the alias is a pointer to another repository, it is reassigned to the given repository ``Id``. arg: repository_id (osid.id.Id): the ``Id`` of a ``Repository`` arg: alias_id (osid.id.Id): the alias ``Id`` raise: AlreadyExists - ``alias_id`` is in use as a primary ``Id`` raise: NotFound - ``repository_id`` not found raise: NullArgument - ``repository_id`` or ``alias_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def createRandomObjectDescriptions(numObjects, numLocationsPerObject, featurePool=("A", "B", "C")): """ Returns {"Object 1": [(0, "C"), (1, "B"), (2, "C"), ...], "Object 2": [(0, "C"), (1, "A"), (2, "B"), ...]} """ return dict(("Object %d" % i, zip(xrange(numLocationsPerObject), [random.choice(featurePool) for _ in xrange(numLocationsPerObject)])) for i in xrange(1, numObjects + 1))
Returns {"Object 1": [(0, "C"), (1, "B"), (2, "C"), ...], "Object 2": [(0, "C"), (1, "A"), (2, "B"), ...]}
def simBirth(self,which_agents): ''' Makes new consumers for the given indices. Initialized variables include aNrm and pLvl, as well as time variables t_age and t_cycle. Normalized assets and permanent income levels are drawn from lognormal distributions given by aNrmInitMean and aNrmInitStd (etc). Parameters ---------- which_agents : np.array(Bool) Boolean array of size self.AgentCount indicating which agents should be "born". Returns ------- None ''' # Get and store states for newly born agents N = np.sum(which_agents) # Number of new consumers to make self.aNrmNow[which_agents] = drawLognormal(N,mu=self.aNrmInitMean,sigma=self.aNrmInitStd,seed=self.RNG.randint(0,2**31-1)) pLvlInitMeanNow = self.pLvlInitMean + np.log(self.PlvlAggNow) # Account for newer cohorts having higher permanent income self.pLvlNow[which_agents] = drawLognormal(N,mu=pLvlInitMeanNow,sigma=self.pLvlInitStd,seed=self.RNG.randint(0,2**31-1)) self.t_age[which_agents] = 0 # How many periods since each agent was born self.t_cycle[which_agents] = 0 # Which period of the cycle each agent is currently in return None
Makes new consumers for the given indices. Initialized variables include aNrm and pLvl, as well as time variables t_age and t_cycle. Normalized assets and permanent income levels are drawn from lognormal distributions given by aNrmInitMean and aNrmInitStd (etc). Parameters ---------- which_agents : np.array(Bool) Boolean array of size self.AgentCount indicating which agents should be "born". Returns ------- None
def generate_openmp_enabled_py(packagename, srcdir='.', disable_openmp=None): """ Generate ``package.openmp_enabled.is_openmp_enabled``, which can then be used to determine, post build, whether the package was built with or without OpenMP support. """ if packagename.lower() == 'astropy': packagetitle = 'Astropy' else: packagetitle = packagename epoch = int(os.environ.get('SOURCE_DATE_EPOCH', time.time())) timestamp = datetime.datetime.utcfromtimestamp(epoch) if disable_openmp is not None: import builtins builtins._ASTROPY_DISABLE_SETUP_WITH_OPENMP_ = disable_openmp if _ASTROPY_DISABLE_SETUP_WITH_OPENMP_: log.info("OpenMP support has been explicitly disabled.") openmp_support = False if _ASTROPY_DISABLE_SETUP_WITH_OPENMP_ else is_openmp_supported() src = _IS_OPENMP_ENABLED_SRC.format(packagetitle=packagetitle, timestamp=timestamp, return_bool=openmp_support) package_srcdir = os.path.join(srcdir, *packagename.split('.')) is_openmp_enabled_py = os.path.join(package_srcdir, 'openmp_enabled.py') with open(is_openmp_enabled_py, 'w') as f: f.write(src)
Generate ``package.openmp_enabled.is_openmp_enabled``, which can then be used to determine, post build, whether the package was built with or without OpenMP support.
def interpolate_to_isosurface(level_var, interp_var, level, **kwargs): r"""Linear interpolation of a variable to a given vertical level from given values. This function assumes that highest vertical level (lowest pressure) is zeroth index. A classic use of this function would be to compute the potential temperature on the dynamic tropopause (2 PVU surface). Parameters ---------- level_var: array_like (P, M, N) Level values in 3D grid on common vertical coordinate (e.g., PV values on isobaric levels). Assumes height dimension is highest to lowest in atmosphere. interp_var: array_like (P, M, N) Variable on 3D grid with same vertical coordinate as level_var to interpolate to given level (e.g., potential temperature on isobaric levels) level: int or float Desired interpolated level (e.g., 2 PVU surface) Other Parameters ---------------- bottom_up_search : bool, optional Controls whether to search for levels bottom-up, or top-down. Defaults to True, which is bottom-up search. Returns ------- interp_level: (M, N) ndarray The interpolated variable (e.g., potential temperature) on the desired level (e.g., 2 PVU surface) Notes ----- This function implements a linear interpolation to estimate values on a given surface. The prototypical example is interpolation of potential temperature to the dynamic tropopause (e.g., 2 PVU surface) """ # Change when Python 2.7 no longer supported # Pull out keyword arguments bottom_up_search = kwargs.pop('bottom_up_search', True) # Find index values above and below desired interpolated surface value above, below, good = metpy.calc.find_bounding_indices(level_var, [level], axis=0, from_below=bottom_up_search) # Linear interpolation of variable to interpolated surface value interp_level = (((level - level_var[above]) / (level_var[below] - level_var[above])) * (interp_var[below] - interp_var[above])) + interp_var[above] # Handle missing values and instances where no values for surface exist above and below interp_level[~good] = np.nan minvar = (np.min(level_var, axis=0) >= level) maxvar = (np.max(level_var, axis=0) <= level) interp_level[0][minvar] = interp_var[-1][minvar] interp_level[0][maxvar] = interp_var[0][maxvar] return interp_level.squeeze()
r"""Linear interpolation of a variable to a given vertical level from given values. This function assumes that highest vertical level (lowest pressure) is zeroth index. A classic use of this function would be to compute the potential temperature on the dynamic tropopause (2 PVU surface). Parameters ---------- level_var: array_like (P, M, N) Level values in 3D grid on common vertical coordinate (e.g., PV values on isobaric levels). Assumes height dimension is highest to lowest in atmosphere. interp_var: array_like (P, M, N) Variable on 3D grid with same vertical coordinate as level_var to interpolate to given level (e.g., potential temperature on isobaric levels) level: int or float Desired interpolated level (e.g., 2 PVU surface) Other Parameters ---------------- bottom_up_search : bool, optional Controls whether to search for levels bottom-up, or top-down. Defaults to True, which is bottom-up search. Returns ------- interp_level: (M, N) ndarray The interpolated variable (e.g., potential temperature) on the desired level (e.g., 2 PVU surface) Notes ----- This function implements a linear interpolation to estimate values on a given surface. The prototypical example is interpolation of potential temperature to the dynamic tropopause (e.g., 2 PVU surface)
def getlist(self, key): """Returns a Storage value as a list. If the value is a list it will be returned as-is. If object is None, an empty list will be returned. Otherwise, `[value]` will be returned. Example output for a query string of `?x=abc&y=abc&y=def`:: >>> request = Storage() >>> request.vars = Storage() >>> request.vars.x = 'abc' >>> request.vars.y = ['abc', 'def'] >>> request.vars.getlist('x') ['abc'] >>> request.vars.getlist('y') ['abc', 'def'] >>> request.vars.getlist('z') [] """ value = self.get(key, []) if value is None or isinstance(value, (list, tuple)): return value else: return [value]
Returns a Storage value as a list. If the value is a list it will be returned as-is. If object is None, an empty list will be returned. Otherwise, `[value]` will be returned. Example output for a query string of `?x=abc&y=abc&y=def`:: >>> request = Storage() >>> request.vars = Storage() >>> request.vars.x = 'abc' >>> request.vars.y = ['abc', 'def'] >>> request.vars.getlist('x') ['abc'] >>> request.vars.getlist('y') ['abc', 'def'] >>> request.vars.getlist('z') []
def start(self): """Launch this postgres server. If it's already running, do nothing. If the backing storage directory isn't configured, raise NotInitializedError. This method is optional. If you're running in an environment where the DBMS is provided as part of the basic infrastructure, you probably want to skip this step! """ log.info('Starting PostgreSQL at %s:%s', self.host, self.port) if not self.base_pathname: tmpl = ('Invalid base_pathname: %r. Did you forget to call ' '.initdb()?') raise NotInitializedError(tmpl % self.base_pathname) conf_file = os.path.join(self.base_pathname, 'postgresql.conf') if not os.path.exists(conf_file): tmpl = 'No config file at: %r. Did you forget to call .initdb()?' raise NotInitializedError(tmpl % self.base_pathname) if not self.is_running(): version = self.get_version() if version and version >= (9, 3): socketop = 'unix_socket_directories' else: socketop = 'unix_socket_directory' postgres_options = [ # When running not as root, postgres might try to put files # where they're not writable (see # https://paste.yougov.net/YKdgi). So set the socket_dir. '-c', '{}={}'.format(socketop, self.base_pathname), '-h', self.host, '-i', # enable TCP/IP connections '-p', self.port, ] subprocess.check_call([ PostgresFinder.find_root() / 'pg_ctl', 'start', '-D', self.base_pathname, '-l', os.path.join(self.base_pathname, 'postgresql.log'), '-o', subprocess.list2cmdline(postgres_options), ]) # Postgres may launch, then abort if it's unhappy with some parameter. # This post-launch test helps us decide. if not self.is_running(): tmpl = ('%s aborted immediately after launch, check ' 'postgresql.log in storage dir') raise RuntimeError(tmpl % self)
Launch this postgres server. If it's already running, do nothing. If the backing storage directory isn't configured, raise NotInitializedError. This method is optional. If you're running in an environment where the DBMS is provided as part of the basic infrastructure, you probably want to skip this step!
def get_process_curses_data(self, p, first, args): """Get curses data to display for a process. - p is the process to display - first is a tag=True if the process is the first on the list """ ret = [self.curse_new_line()] # CPU if 'cpu_percent' in p and p['cpu_percent'] is not None and p['cpu_percent'] != '': if args.disable_irix and self.nb_log_core != 0: msg = self.layout_stat['cpu'].format(p['cpu_percent'] / float(self.nb_log_core)) else: msg = self.layout_stat['cpu'].format(p['cpu_percent']) alert = self.get_alert(p['cpu_percent'], highlight_zero=False, is_max=(p['cpu_percent'] == self.max_values['cpu_percent']), header="cpu") ret.append(self.curse_add_line(msg, alert)) else: msg = self.layout_header['cpu'].format('?') ret.append(self.curse_add_line(msg)) # MEM if 'memory_percent' in p and p['memory_percent'] is not None and p['memory_percent'] != '': msg = self.layout_stat['mem'].format(p['memory_percent']) alert = self.get_alert(p['memory_percent'], highlight_zero=False, is_max=(p['memory_percent'] == self.max_values['memory_percent']), header="mem") ret.append(self.curse_add_line(msg, alert)) else: msg = self.layout_header['mem'].format('?') ret.append(self.curse_add_line(msg)) # VMS/RSS if 'memory_info' in p and p['memory_info'] is not None and p['memory_info'] != '': # VMS msg = self.layout_stat['virt'].format(self.auto_unit(p['memory_info'][1], low_precision=False)) ret.append(self.curse_add_line(msg, optional=True)) # RSS msg = self.layout_stat['res'].format(self.auto_unit(p['memory_info'][0], low_precision=False)) ret.append(self.curse_add_line(msg, optional=True)) else: msg = self.layout_header['virt'].format('?') ret.append(self.curse_add_line(msg)) msg = self.layout_header['res'].format('?') ret.append(self.curse_add_line(msg)) # PID msg = self.layout_stat['pid'].format(p['pid'], width=self.__max_pid_size()) ret.append(self.curse_add_line(msg)) # USER if 'username' in p: # docker internal users are displayed as ints only, therefore str() # Correct issue #886 on Windows OS msg = self.layout_stat['user'].format(str(p['username'])[:9]) ret.append(self.curse_add_line(msg)) else: msg = self.layout_header['user'].format('?') ret.append(self.curse_add_line(msg)) # TIME+ try: # Sum user and system time user_system_time = p['cpu_times'][0] + p['cpu_times'][1] except (OverflowError, TypeError) as e: # Catch OverflowError on some Amazon EC2 server # See https://github.com/nicolargo/glances/issues/87 # Also catch TypeError on macOS # See: https://github.com/nicolargo/glances/issues/622 # logger.debug("Cannot get TIME+ ({})".format(e)) msg = self.layout_header['time'].format('?') ret.append(self.curse_add_line(msg, optional=True)) else: hours, minutes, seconds = seconds_to_hms(user_system_time) if hours > 99: msg = '{:<7}h'.format(hours) elif 0 < hours < 100: msg = '{}h{}:{}'.format(hours, minutes, seconds) else: msg = '{}:{}'.format(minutes, seconds) msg = self.layout_stat['time'].format(msg) if hours > 0: ret.append(self.curse_add_line(msg, decoration='CPU_TIME', optional=True)) else: ret.append(self.curse_add_line(msg, optional=True)) # THREAD if 'num_threads' in p: num_threads = p['num_threads'] if num_threads is None: num_threads = '?' msg = self.layout_stat['thread'].format(num_threads) ret.append(self.curse_add_line(msg)) else: msg = self.layout_header['thread'].format('?') ret.append(self.curse_add_line(msg)) # NICE if 'nice' in p: nice = p['nice'] if nice is None: nice = '?' msg = self.layout_stat['nice'].format(nice) ret.append(self.curse_add_line(msg, decoration=self.get_nice_alert(nice))) else: msg = self.layout_header['nice'].format('?') ret.append(self.curse_add_line(msg)) # STATUS if 'status' in p: status = p['status'] msg = self.layout_stat['status'].format(status) if status == 'R': ret.append(self.curse_add_line(msg, decoration='STATUS')) else: ret.append(self.curse_add_line(msg)) else: msg = self.layout_header['status'].format('?') ret.append(self.curse_add_line(msg)) # IO read/write if 'io_counters' in p and p['io_counters'][4] == 1 and p['time_since_update'] != 0: # Display rate if stats is available and io_tag ([4]) == 1 # IO read io_rs = int((p['io_counters'][0] - p['io_counters'][2]) / p['time_since_update']) if io_rs == 0: msg = self.layout_stat['ior'].format("0") else: msg = self.layout_stat['ior'].format(self.auto_unit(io_rs, low_precision=True)) ret.append(self.curse_add_line(msg, optional=True, additional=True)) # IO write io_ws = int((p['io_counters'][1] - p['io_counters'][3]) / p['time_since_update']) if io_ws == 0: msg = self.layout_stat['iow'].format("0") else: msg = self.layout_stat['iow'].format(self.auto_unit(io_ws, low_precision=True)) ret.append(self.curse_add_line(msg, optional=True, additional=True)) else: msg = self.layout_header['ior'].format("?") ret.append(self.curse_add_line(msg, optional=True, additional=True)) msg = self.layout_header['iow'].format("?") ret.append(self.curse_add_line(msg, optional=True, additional=True)) # Command line # If no command line for the process is available, fallback to # the bare process name instead if 'cmdline' in p: cmdline = p['cmdline'] else: cmdline = '?' try: if cmdline: path, cmd, arguments = split_cmdline(cmdline) if os.path.isdir(path) and not args.process_short_name: msg = self.layout_stat['command'].format(path) + os.sep ret.append(self.curse_add_line(msg, splittable=True)) ret.append(self.curse_add_line(cmd, decoration='PROCESS', splittable=True)) else: msg = self.layout_stat['command'].format(cmd) ret.append(self.curse_add_line(msg, decoration='PROCESS', splittable=True)) if arguments: msg = ' ' + self.layout_stat['command'].format(arguments) ret.append(self.curse_add_line(msg, splittable=True)) else: msg = self.layout_stat['name'].format(p['name']) ret.append(self.curse_add_line(msg, splittable=True)) except (TypeError, UnicodeEncodeError) as e: # Avoid crach after running fine for several hours #1335 logger.debug("Can not decode command line '{}' ({})".format(cmdline, e)) ret.append(self.curse_add_line('', splittable=True)) # Add extended stats but only for the top processes if first and 'extended_stats' in p and args.enable_process_extended: # Left padding xpad = ' ' * 13 # First line is CPU affinity if 'cpu_affinity' in p and p['cpu_affinity'] is not None: ret.append(self.curse_new_line()) msg = xpad + 'CPU affinity: ' + str(len(p['cpu_affinity'])) + ' cores' ret.append(self.curse_add_line(msg, splittable=True)) # Second line is memory info if 'memory_info' in p and \ p['memory_info'] is not None: ret.append(self.curse_new_line()) msg = '{}Memory info: {}'.format(xpad, p['memory_info']) if 'memory_swap' in p and p['memory_swap'] is not None: msg += ' swap ' + self.auto_unit(p['memory_swap'], low_precision=False) ret.append(self.curse_add_line(msg, splittable=True)) # Third line is for open files/network sessions msg = '' if 'num_threads' in p and p['num_threads'] is not None: msg += str(p['num_threads']) + ' threads ' if 'num_fds' in p and p['num_fds'] is not None: msg += str(p['num_fds']) + ' files ' if 'num_handles' in p and p['num_handles'] is not None: msg += str(p['num_handles']) + ' handles ' if 'tcp' in p and p['tcp'] is not None: msg += str(p['tcp']) + ' TCP ' if 'udp' in p and p['udp'] is not None: msg += str(p['udp']) + ' UDP' if msg != '': ret.append(self.curse_new_line()) msg = xpad + 'Open: ' + msg ret.append(self.curse_add_line(msg, splittable=True)) # Fouth line is IO nice level (only Linux and Windows OS) if 'ionice' in p and \ p['ionice'] is not None \ and hasattr(p['ionice'], 'ioclass'): ret.append(self.curse_new_line()) msg = xpad + 'IO nice: ' k = 'Class is ' v = p['ionice'].ioclass # Linux: The scheduling class. 0 for none, 1 for real time, 2 for best-effort, 3 for idle. # Windows: On Windows only ioclass is used and it can be set to 2 (normal), 1 (low) or 0 (very low). if WINDOWS: if v == 0: msg += k + 'Very Low' elif v == 1: msg += k + 'Low' elif v == 2: msg += 'No specific I/O priority' else: msg += k + str(v) else: if v == 0: msg += 'No specific I/O priority' elif v == 1: msg += k + 'Real Time' elif v == 2: msg += k + 'Best Effort' elif v == 3: msg += k + 'IDLE' else: msg += k + str(v) # value is a number which goes from 0 to 7. # The higher the value, the lower the I/O priority of the process. if hasattr(p['ionice'], 'value') and p['ionice'].value != 0: msg += ' (value %s/7)' % str(p['ionice'].value) ret.append(self.curse_add_line(msg, splittable=True)) return ret
Get curses data to display for a process. - p is the process to display - first is a tag=True if the process is the first on the list
def get(self, key, failobj=None, exact=0): """Raises exception if key is ambiguous""" if not exact: key = self.getfullkey(key,new=1) return self.data.get(key,failobj)
Raises exception if key is ambiguous
def parse(self, limit=None): """ Override Source.parse() Parses version and interaction information from CTD Args: :param limit (int, optional) limit the number of rows processed Returns: :return None """ if limit is not None: LOG.info("Only parsing first %d rows", limit) LOG.info("Parsing files...") # pub_map = dict() # file_path = '/'.join((self.rawdir, # self.static_files['publications']['file'])) # if os.path.exists(file_path) is True: # pub_map = self._parse_publication_file( # self.static_files['publications']['file'] # ) if self.test_only: self.test_mode = True self.geno = Genotype(self.graph) self.pathway = Pathway(self.graph) self._parse_ctd_file( limit, self.files['chemical_disease_interactions']['file']) self._parse_ctd_file(limit, self.files['gene_pathway']['file']) self._parse_ctd_file(limit, self.files['gene_disease']['file']) self._parse_curated_chem_disease(limit) LOG.info("Done parsing files.") return
Override Source.parse() Parses version and interaction information from CTD Args: :param limit (int, optional) limit the number of rows processed Returns: :return None
def hydrate_target(hydrated_struct): target_adaptor = hydrated_struct.value """Construct a HydratedTarget from a TargetAdaptor and hydrated versions of its adapted fields.""" # Hydrate the fields of the adaptor and re-construct it. hydrated_fields = yield [Get(HydratedField, HydrateableField, fa) for fa in target_adaptor.field_adaptors] kwargs = target_adaptor.kwargs() for field in hydrated_fields: kwargs[field.name] = field.value yield HydratedTarget(target_adaptor.address, TargetAdaptor(**kwargs), tuple(target_adaptor.dependencies))
Construct a HydratedTarget from a TargetAdaptor and hydrated versions of its adapted fields.
def eject_media(self): """Ejects Virtual Media. :raises: SushyError, on an error from iLO. """ try: super(VirtualMedia, self).eject_media() except sushy_exceptions.SushyError: target_uri = self._get_action_element('eject').target_uri self._conn.post(target_uri, data={})
Ejects Virtual Media. :raises: SushyError, on an error from iLO.
def RepackTemplate(self, template_path, output_dir, upload=False, token=None, sign=False, context=None, signed_template=False): """Repack binaries based on the configuration. We repack all templates in the templates directory. We expect to find only functioning templates, all other files should be removed. Each template contains a build.yaml that specifies how it was built and how it should be repacked. Args: template_path: template path string output_dir: Output files will be put in this directory. upload: If specified we also upload the repacked binary into the token: Token to use when uploading to the datastore. sign: If true, we want to digitally sign the installer. context: Array of context strings signed_template: If true, the libraries in the template are already signed. This is only used for windows when repacking the template multiple times. Returns: A list of output installers generated. """ orig_config = config.CONFIG repack_config = RepackConfig() print("Repacking template: %s" % template_path) config.CONFIG = repack_config.GetConfigFromTemplate(template_path) result_path = None try: repack_context = config.CONFIG["Template.build_context"] if context: repack_context.extend(context) output_path = os.path.join( output_dir, config.CONFIG.Get( "ClientRepacker.output_filename", context=repack_context)) print("Using context: %s and labels: %s" % (repack_context, config.CONFIG.Get("Client.labels", context=repack_context))) try: signer = None if sign: signer = self.GetSigner(repack_context) builder_obj = self.GetRepacker(context=repack_context, signer=signer) builder_obj.signed_template = signed_template result_path = builder_obj.MakeDeployableBinary(template_path, output_path) except Exception: # pylint: disable=broad-except logging.exception("Repacking template %s failed:", template_path) if result_path: print("Repacked into %s" % result_path) if upload: # We delay import here so we don't have to import the entire server # codebase and do full server init if we're just building and # repacking clients. This codepath is used by config_updater # initialize # pylint: disable=g-import-not-at-top from grr_response_server import maintenance_utils # pylint: enable=g-import-not-at-top client_platform = config.CONFIG.Get( "Client.platform", context=repack_context) repack_basename = config.CONFIG.Get( "ClientRepacker.output_basename", context=repack_context) repack_extension = config.CONFIG.Get( "ClientBuilder.output_extension", context=repack_context) repack_filename = repack_basename + repack_extension # TODO(user): Use signed_binary_utils.GetAFF4ExecutablesRoot() # here instead of hardcoding the root once repacking logic has been # moved to grr-response-server. binary_urn = rdfvalue.RDFURN("aff4:/config/executables").Add( client_platform).Add("installers").Add(repack_filename) maintenance_utils.UploadSignedConfigBlob( open(result_path, "rb").read(100 * 1024 * 1024), binary_urn, client_context=repack_context, token=token) else: print("Failed to repack %s." % template_path) finally: config.CONFIG = orig_config return result_path
Repack binaries based on the configuration. We repack all templates in the templates directory. We expect to find only functioning templates, all other files should be removed. Each template contains a build.yaml that specifies how it was built and how it should be repacked. Args: template_path: template path string output_dir: Output files will be put in this directory. upload: If specified we also upload the repacked binary into the token: Token to use when uploading to the datastore. sign: If true, we want to digitally sign the installer. context: Array of context strings signed_template: If true, the libraries in the template are already signed. This is only used for windows when repacking the template multiple times. Returns: A list of output installers generated.
def is_choked_turbulent_l(dP, P1, Psat, FF, FL=None, FLP=None, FP=None): r'''Calculates if a liquid flow in IEC 60534 calculations is critical or not, for use in IEC 60534 liquid valve sizing calculations. Either FL may be provided or FLP and FP, depending on the calculation process. .. math:: \Delta P > F_L^2(P_1 - F_F P_{sat}) .. math:: \Delta P >= \left(\frac{F_{LP}}{F_P}\right)^2(P_1 - F_F P_{sat}) Parameters ---------- dP : float Differential pressure across the valve, with reducer/expanders [Pa] P1 : float Pressure of the fluid before the valve and reducers/expanders [Pa] Psat : float Saturation pressure of the fluid at inlet temperature [Pa] FF : float Liquid critical pressure ratio factor [-] FL : float, optional Liquid pressure recovery factor of a control valve without attached fittings [-] FLP : float, optional Combined liquid pressure recovery factor with piping geometry factor, for a control valve with attached fittings [-] FP : float, optional Piping geometry factor [-] Returns ------- choked : bool Whether or not the flow is choked [-] Examples -------- >>> is_choked_turbulent_l(460.0, 680.0, 70.1, 0.94, 0.9) False >>> is_choked_turbulent_l(460.0, 680.0, 70.1, 0.94, 0.6) True References ---------- .. [1] IEC 60534-2-1 / ISA-75.01.01-2007 ''' if FLP and FP: return dP >= (FLP/FP)**2*(P1-FF*Psat) elif FL: return dP >= FL**2*(P1-FF*Psat) else: raise Exception('Either (FLP and FP) or FL is needed')
r'''Calculates if a liquid flow in IEC 60534 calculations is critical or not, for use in IEC 60534 liquid valve sizing calculations. Either FL may be provided or FLP and FP, depending on the calculation process. .. math:: \Delta P > F_L^2(P_1 - F_F P_{sat}) .. math:: \Delta P >= \left(\frac{F_{LP}}{F_P}\right)^2(P_1 - F_F P_{sat}) Parameters ---------- dP : float Differential pressure across the valve, with reducer/expanders [Pa] P1 : float Pressure of the fluid before the valve and reducers/expanders [Pa] Psat : float Saturation pressure of the fluid at inlet temperature [Pa] FF : float Liquid critical pressure ratio factor [-] FL : float, optional Liquid pressure recovery factor of a control valve without attached fittings [-] FLP : float, optional Combined liquid pressure recovery factor with piping geometry factor, for a control valve with attached fittings [-] FP : float, optional Piping geometry factor [-] Returns ------- choked : bool Whether or not the flow is choked [-] Examples -------- >>> is_choked_turbulent_l(460.0, 680.0, 70.1, 0.94, 0.9) False >>> is_choked_turbulent_l(460.0, 680.0, 70.1, 0.94, 0.6) True References ---------- .. [1] IEC 60534-2-1 / ISA-75.01.01-2007
def colored_pygments_excepthook(type_, value, tb): """ References: https://stackoverflow.com/questions/14775916/color-exceptions-python CommandLine: python -m utool.util_inject --test-colored_pygments_excepthook """ tbtext = ''.join(traceback.format_exception(type_, value, tb)) try: from utool import util_str formatted_text = util_str.highlight_text(tbtext, lexer_name='pytb', stripall=True) except Exception: # FIXME silent errro formatted_text = tbtext return sys.__excepthook__(type_, value, tb) #import utool as ut #if ut.SUPER_STRICT: # raise sys.stderr.write(formatted_text)
References: https://stackoverflow.com/questions/14775916/color-exceptions-python CommandLine: python -m utool.util_inject --test-colored_pygments_excepthook
def read_rels(archive): """Read relationships for a workbook""" xml_source = archive.read(ARC_WORKBOOK_RELS) tree = fromstring(xml_source) for element in safe_iterator(tree, '{%s}Relationship' % PKG_REL_NS): rId = element.get('Id') pth = element.get("Target") typ = element.get('Type') # normalise path if pth.startswith("/xl"): pth = pth.replace("/xl", "xl") elif not pth.startswith("xl") and not pth.startswith(".."): pth = "xl/" + pth yield rId, {'path':pth, 'type':typ}
Read relationships for a workbook
def removeApplicationManifest(self, pchApplicationManifestFullPath): """Removes an application manifest from the list to load when building the list of installed applications.""" fn = self.function_table.removeApplicationManifest result = fn(pchApplicationManifestFullPath) return result
Removes an application manifest from the list to load when building the list of installed applications.
def messages(request, year=None, month=None, day=None, template="gnotty/messages.html"): """ Show messages for the given query or day. """ query = request.REQUEST.get("q") prev_url, next_url = None, None messages = IRCMessage.objects.all() if hide_joins_and_leaves(request): messages = messages.filter(join_or_leave=False) if query: search = Q(message__icontains=query) | Q(nickname__icontains=query) messages = messages.filter(search).order_by("-message_time") elif year and month and day: messages = messages.filter(message_time__year=year, message_time__month=month, message_time__day=day) day_delta = timedelta(days=1) this_date = date(int(year), int(month), int(day)) prev_date = this_date - day_delta next_date = this_date + day_delta prev_url = reverse("gnotty_day", args=prev_date.timetuple()[:3]) next_url = reverse("gnotty_day", args=next_date.timetuple()[:3]) else: return redirect("gnotty_year", year=datetime.now().year) context = dict(settings) context["messages"] = messages context["prev_url"] = prev_url context["next_url"] = next_url return render(request, template, context)
Show messages for the given query or day.
def ChangeUserStatus(self, Status): """Changes the online status for the current user. :Parameters: Status : `enums`.cus* New online status for the user. :note: This function waits until the online status changes. Alternatively, use the `CurrentUserStatus` property to perform an immediate change of status. """ if self.CurrentUserStatus.upper() == Status.upper(): return self._ChangeUserStatus_Event = threading.Event() self._ChangeUserStatus_Status = Status.upper() self.RegisterEventHandler('UserStatus', self._ChangeUserStatus_UserStatus) self.CurrentUserStatus = Status self._ChangeUserStatus_Event.wait() self.UnregisterEventHandler('UserStatus', self._ChangeUserStatus_UserStatus) del self._ChangeUserStatus_Event, self._ChangeUserStatus_Status
Changes the online status for the current user. :Parameters: Status : `enums`.cus* New online status for the user. :note: This function waits until the online status changes. Alternatively, use the `CurrentUserStatus` property to perform an immediate change of status.
def unlock_key(key_name, stash, passphrase, backend): """Unlock a key to allow it to be modified, deleted or purged `KEY_NAME` is the name of the key to unlock """ stash = _get_stash(backend, stash, passphrase) try: click.echo('Unlocking key...') stash.unlock(key_name=key_name) click.echo('Key unlocked successfully') except GhostError as ex: sys.exit(ex)
Unlock a key to allow it to be modified, deleted or purged `KEY_NAME` is the name of the key to unlock
def encodeSplines(x, n_bases=10, spline_order=3, start=None, end=None, warn=True): """**Deprecated**. Function version of the transformer class `EncodeSplines`. Get B-spline base-function expansion # Details First, the knots for B-spline basis functions are placed equidistantly on the [start, end] range. (inferred from the data if None). Next, b_n(x) value is is computed for each x and each n (spline-index) with `scipy.interpolate.splev`. # Arguments x: a numpy array of positions with 2 dimensions n_bases int: Number of spline bases. spline_order: 2 for quadratic, 3 for qubic splines start, end: range of values. If None, they are inferred from the data as minimum and maximum value. warn: Show warnings. # Returns `np.ndarray` of shape `(x.shape[0], x.shape[1], n_bases)` """ # TODO - make it general... if len(x.shape) == 1: x = x.reshape((-1, 1)) if start is None: start = np.nanmin(x) else: if x.min() < start: if warn: print("WARNING, x.min() < start for some elements. Truncating them to start: x[x < start] = start") x = _trunc(x, minval=start) if end is None: end = np.nanmax(x) else: if x.max() > end: if warn: print("WARNING, x.max() > end for some elements. Truncating them to end: x[x > end] = end") x = _trunc(x, maxval=end) bs = BSpline(start, end, n_bases=n_bases, spline_order=spline_order ) # concatenate x to long assert len(x.shape) == 2 n_rows = x.shape[0] n_cols = x.shape[1] x_long = x.reshape((-1,)) x_feat = bs.predict(x_long, add_intercept=False) # shape = (n_rows * n_cols, n_bases) x_final = x_feat.reshape((n_rows, n_cols, n_bases)) return x_final
**Deprecated**. Function version of the transformer class `EncodeSplines`. Get B-spline base-function expansion # Details First, the knots for B-spline basis functions are placed equidistantly on the [start, end] range. (inferred from the data if None). Next, b_n(x) value is is computed for each x and each n (spline-index) with `scipy.interpolate.splev`. # Arguments x: a numpy array of positions with 2 dimensions n_bases int: Number of spline bases. spline_order: 2 for quadratic, 3 for qubic splines start, end: range of values. If None, they are inferred from the data as minimum and maximum value. warn: Show warnings. # Returns `np.ndarray` of shape `(x.shape[0], x.shape[1], n_bases)`
def dimap_V(D, I): """ FUNCTION TO MAP DECLINATION, INCLINATIONS INTO EQUAL AREA PROJECTION, X,Y Usage: dimap_V(D, I) D and I are both numpy arrays """ # GET CARTESIAN COMPONENTS OF INPUT DIRECTION DI = np.array([D, I]).transpose() X = dir2cart(DI).transpose() # CALCULATE THE X,Y COORDINATES FOR THE EQUAL AREA PROJECTION # from Collinson 1983 R = np.sqrt(1. - abs(X[2]))/(np.sqrt(X[0]**2 + X[1]**2)) XY = np.array([X[1] * R, X[0] * R]).transpose() # RETURN XY[X,Y] return XY
FUNCTION TO MAP DECLINATION, INCLINATIONS INTO EQUAL AREA PROJECTION, X,Y Usage: dimap_V(D, I) D and I are both numpy arrays
def load_model(path): """ Load a saved H2O model from disk. (Note that ensemble binary models can now be loaded using this method.) :param path: the full path of the H2O Model to be imported. :returns: an :class:`H2OEstimator` object :examples: >>> path = h2o.save_model(my_model, dir=my_path) >>> h2o.load_model(path) """ assert_is_type(path, str) res = api("POST /99/Models.bin/%s" % "", data={"dir": path}) return get_model(res["models"][0]["model_id"]["name"])
Load a saved H2O model from disk. (Note that ensemble binary models can now be loaded using this method.) :param path: the full path of the H2O Model to be imported. :returns: an :class:`H2OEstimator` object :examples: >>> path = h2o.save_model(my_model, dir=my_path) >>> h2o.load_model(path)
def _escalation_rules_to_string(escalation_rules): 'convert escalation_rules dict to a string for comparison' result = '' for rule in escalation_rules: result += 'escalation_delay_in_minutes: {0} '.format(rule['escalation_delay_in_minutes']) for target in rule['targets']: result += '{0}:{1} '.format(target['type'], target['id']) return result
convert escalation_rules dict to a string for comparison
def merge(self, workdir, ddb_files, out_ddb, description, delete_source_ddbs=True): """Merge DDB file, return the absolute path of the new database in workdir.""" # We work with absolute paths. ddb_files = [os.path.abspath(s) for s in list_strings(ddb_files)] if not os.path.isabs(out_ddb): out_ddb = os.path.join(os.path.abspath(workdir), os.path.basename(out_ddb)) if self.verbose: print("Will merge %d files into output DDB %s" % (len(ddb_files), out_ddb)) for i, f in enumerate(ddb_files): print(" [%d] %s" % (i, f)) # Handle the case of a single file since mrgddb uses 1 to denote GS files! if len(ddb_files) == 1: with open(ddb_files[0], "r") as inh, open(out_ddb, "w") as out: for line in inh: out.write(line) return out_ddb self.stdin_fname, self.stdout_fname, self.stderr_fname = \ map(os.path.join, 3 * [os.path.abspath(workdir)], ["mrgddb.stdin", "mrgddb.stdout", "mrgddb.stderr"]) inp = StringIO() inp.write(out_ddb + "\n") # Name of the output file. inp.write(str(description) + "\n") # Description. inp.write(str(len(ddb_files)) + "\n") # Number of input DDBs. # Names of the DDB files. for fname in ddb_files: inp.write(fname + "\n") self.stdin_data = [s for s in inp.getvalue()] with open(self.stdin_fname, "wt") as fh: fh.writelines(self.stdin_data) # Force OS to write data to disk. fh.flush() os.fsync(fh.fileno()) retcode = self.execute(workdir, exec_args=['--nostrict']) if retcode == 0 and delete_source_ddbs: # Remove ddb files. for f in ddb_files: try: os.remove(f) except IOError: pass return out_ddb
Merge DDB file, return the absolute path of the new database in workdir.
def initialize_zones(self): """initialize receiver zones""" zone_list = self.location_info.get('zone_list', {'main': True}) for zone_id in zone_list: if zone_list[zone_id]: # Location setup is valid self.zones[zone_id] = Zone(self, zone_id=zone_id) else: # Location setup is not valid _LOGGER.debug("Ignoring zone: %s", zone_id)
initialize receiver zones
def get_root(): """Get the project root directory. We require that all commands are run from the project root, i.e. the directory that contains setup.py, setup.cfg, and versioneer.py . """ root = os.path.realpath(os.path.abspath(os.getcwd())) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): # allow 'python path/to/setup.py COMMAND' root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): err = ("Versioneer was unable to run the project root directory. " "Versioneer requires setup.py to be executed from " "its immediate directory (like 'python setup.py COMMAND'), " "or in a way that lets it use sys.argv[0] to find the root " "(like 'python path/to/setup.py COMMAND').") raise VersioneerBadRootError(err) try: # Certain runtime workflows (setup.py install/develop in a setuptools # tree) execute all dependencies in a single python process, so # "versioneer" may be imported multiple times, and python's shared # module-import table will cache the first one. So we can't use # os.path.dirname(__file__), as that will find whichever # versioneer.py was first imported, even in later projects. me = os.path.realpath(os.path.abspath(__file__)) if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]: print("Warning: build in %s is using versioneer.py from %s" % (os.path.dirname(me), versioneer_py)) except NameError: pass return root
Get the project root directory. We require that all commands are run from the project root, i.e. the directory that contains setup.py, setup.cfg, and versioneer.py .
def valueReadPreprocessor(valueString, replaceParamsFile=None): """ Apply global pre-processing to values during reading throughout the project. Args: valueString (str): String representing the value to be preprocessed. replaceParamsFile (gsshapy.orm.ReplaceParamFile, optional): Instance of the replace param file. Required if replacement variables are included in the project. Returns: str: Processed value as a string """ if type(valueString) is bool: log.warning("Only numerical variable types can be handled by the valueReadPreprocessor function.") return valueString # Default processedValue = valueString # Check for replacement variables if replaceParamsFile is not None and valueString is not None: if '[' in valueString or ']' in valueString: # Set default value processedValue = '{0}'.format(REPLACE_NO_VALUE) # Find the matching parameter and return the negative of the id for targetParam in replaceParamsFile.targetParameters: if targetParam.targetVariable == valueString: processedValue = '{0}'.format(-1 * targetParam.id) break return processedValue
Apply global pre-processing to values during reading throughout the project. Args: valueString (str): String representing the value to be preprocessed. replaceParamsFile (gsshapy.orm.ReplaceParamFile, optional): Instance of the replace param file. Required if replacement variables are included in the project. Returns: str: Processed value as a string
def deletescript(self, name): """Delete a script from the server See MANAGESIEVE specifications, section 2.10 :param name: script's name :rtype: boolean """ code, data = self.__send_command( "DELETESCRIPT", [name.encode("utf-8")]) if code == "OK": return True return False
Delete a script from the server See MANAGESIEVE specifications, section 2.10 :param name: script's name :rtype: boolean
def parse_serialdiff(sd_dict): "helper for translate_check" if isinstance(sd_dict,list): if len(sd_dict)!=2 or sd_dict[0]!='checkstale': raise NotImplementedError(sd_dict[0],len(sd_dict)) return CheckStale(sd_dict[1]) if isinstance(sd_dict['deltas'],list): # i.e. for VHString the whole deltas field is a single string # warning below: Delta.text will be a list sometimes. always? sd_dict['deltas']=[diff.Delta(d['slice']['a'],d['slice']['b'],d['replace']) for d in sd_dict['deltas']] return SerialDiff(**sd_dict)
helper for translate_check
def get_log_entry_ids_by_log(self, log_id): """Gets the list of ``LogEntry`` ``Ids`` associated with a ``Log``. arg: log_id (osid.id.Id): ``Id`` of a ``Log`` return: (osid.id.IdList) - list of related logEntry ``Ids`` raise: NotFound - ``log_id`` is not found raise: NullArgument - ``log_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinSession.get_resource_ids_by_bin id_list = [] for log_entry in self.get_log_entries_by_log(log_ids): id_list.append(log_entry.get_id()) return IdList(id_list)
Gets the list of ``LogEntry`` ``Ids`` associated with a ``Log``. arg: log_id (osid.id.Id): ``Id`` of a ``Log`` return: (osid.id.IdList) - list of related logEntry ``Ids`` raise: NotFound - ``log_id`` is not found raise: NullArgument - ``log_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def raw(node): """ Add some raw html (possibly as a block) """ o = nodes.raw(node.literal, node.literal, format='html') if node.sourcepos is not None: o.line = node.sourcepos[0][0] for n in MarkDown(node): o += n return o
Add some raw html (possibly as a block)
def _read_header(filename): """Read the text header for each file Parameters ---------- channel_file : Path path to single filename with the header Returns ------- dict header """ with filename.open('rb') as f: h = f.read(HDR_LENGTH).decode() header = {} for line in h.split('\n'): if '=' in line: key, value = line.split(' = ') key = key.strip()[7:] value = value.strip()[:-1] header[key] = value return header
Read the text header for each file Parameters ---------- channel_file : Path path to single filename with the header Returns ------- dict header
def custom_config(request): """ Save user-specific configuration property. POST parameters (JSON keys): app_name: application name for which the configuration property is valid (e.g., proso_models) key: name of the property (e.g., predictive_model.class) value: value of the property (number, string, boolean, ..., e.g, proso.models.prediction.PriorCurrentPredictiveModel) condition_key (optional): name of the condition which is used to filter the property (e.g., practice_filter) condition_value (optional): value for the condition filtering the property (e.g., [["context/world"],["category/state"]]) """ if request.method == 'POST': config_dict = json_body(request.body.decode('utf-8')) CustomConfig.objects.try_create( config_dict['app_name'], config_dict['key'], config_dict['value'], request.user.id, config_dict.get('condition_key') if config_dict.get('condition_key') else None, urllib.parse.unquote(config_dict.get('condition_value')) if config_dict.get('condition_value') else None ) return config(request) else: return render_json(request, {}, template='common_custom_config.html', help_text=custom_config.__doc__)
Save user-specific configuration property. POST parameters (JSON keys): app_name: application name for which the configuration property is valid (e.g., proso_models) key: name of the property (e.g., predictive_model.class) value: value of the property (number, string, boolean, ..., e.g, proso.models.prediction.PriorCurrentPredictiveModel) condition_key (optional): name of the condition which is used to filter the property (e.g., practice_filter) condition_value (optional): value for the condition filtering the property (e.g., [["context/world"],["category/state"]])
def reg_load(self, reg, value): """ Load a value into a register. The value can be a string or binary (in which case the value is passed to :meth:`alloc_data`), another :class:`Register`, an :class:`Offset` or :class:`Buffer`, an integer immediate, a ``list`` or ``tuple`` or a syscall invocation. Arguments: reg(pwnypack.shellcode.types.Register): The register to load the value into. value: The value to load into the register. Returns: list: A list of mnemonics that will load value into reg. """ if isinstance(value, (six.text_type, six.binary_type)): value = self.alloc_data(value) if value is None: return self.reg_load_imm(reg, 0) elif isinstance(value, Register): if reg is not value: return self.reg_load_reg(reg, value) else: return [] elif isinstance(value, Offset): if value: return self.reg_load_offset(reg, value) else: return self.reg_load(reg, self.OFFSET_REG) elif isinstance(value, Buffer): return self.reg_load_offset(reg, sum(len(v) for v in six.iterkeys(self.data)) + value.offset) elif isinstance(value, six.integer_types): reg_width = self.REGISTER_WIDTH[reg] if value < -2 ** (reg_width-1): raise ValueError('%d does not fit %s' % (value, reg)) elif value >= 2 ** reg_width: raise ValueError('%d does not fit %s' % (value, reg)) return self.reg_load_imm(reg, value) elif isinstance(value, (list, tuple)): return self.reg_load_array(reg, value) elif isinstance(value, SyscallInvoke): return self.syscall(value) + self.reg_load(reg, self.SYSCALL_RET_REG) else: raise TypeError('Invalid argument type "%s"' % repr(value))
Load a value into a register. The value can be a string or binary (in which case the value is passed to :meth:`alloc_data`), another :class:`Register`, an :class:`Offset` or :class:`Buffer`, an integer immediate, a ``list`` or ``tuple`` or a syscall invocation. Arguments: reg(pwnypack.shellcode.types.Register): The register to load the value into. value: The value to load into the register. Returns: list: A list of mnemonics that will load value into reg.
def get_waveform_filter_norm(approximant, psd, length, delta_f, f_lower): """ Return the normalization vector for the approximant """ if approximant in _filter_norms: return _filter_norms[approximant](psd, length, delta_f, f_lower) else: return None
Return the normalization vector for the approximant
def handle_message(self, ch, method, properties, body): """ this is a pika.basic_consumer callback handles client inputs, runs appropriate workflows and views Args: ch: amqp channel method: amqp method properties: body: message body """ input = {} headers = {} try: self.sessid = method.routing_key input = json_decode(body) data = input['data'] # since this comes as "path" we dont know if it's view or workflow yet # TODO: just a workaround till we modify ui to if 'path' in data: if data['path'] in VIEW_METHODS: data['view'] = data['path'] else: data['wf'] = data['path'] session = Session(self.sessid) headers = {'remote_ip': input['_zops_remote_ip'], 'source': input['_zops_source']} if 'wf' in data: output = self._handle_workflow(session, data, headers) elif 'job' in data: self._handle_job(session, data, headers) return else: output = self._handle_view(session, data, headers) except HTTPError as e: import sys if hasattr(sys, '_called_from_test'): raise output = {"cmd": "error", "error": self._prepare_error_msg(e.message), "code": e.code} log.exception("Http error occurred") except: self.current = Current(session=session, input=data) self.current.headers = headers import sys if hasattr(sys, '_called_from_test'): raise err = traceback.format_exc() output = {"cmd": "error", "error": self._prepare_error_msg(err), "code": 500} log.exception("Worker error occurred with messsage body:\n%s" % body) if 'callbackID' in input: output['callbackID'] = input['callbackID'] log.info("OUTPUT for %s: %s" % (self.sessid, output)) output['reply_timestamp'] = time() self.send_output(output)
this is a pika.basic_consumer callback handles client inputs, runs appropriate workflows and views Args: ch: amqp channel method: amqp method properties: body: message body
def make_accessors(self, columns): """ Accessors can be numeric keys for sequence row data, string keys for mapping row data, or a callable function. For numeric and string accessors they can be inside a 2 element tuple where the 2nd value is the default value; Similar to dict.get(lookup, default). """ accessors = list(self.accessors_def or range(columns)) for i, x in enumerate(accessors): if not callable(x): if isinstance(x, collections.abc.Sequence) and \ not isinstance(x, str): key, default = x else: key = x default = '' def acc(row, key=key, default=default): try: return row[key] except (KeyError, IndexError): return default accessors[i] = acc return accessors
Accessors can be numeric keys for sequence row data, string keys for mapping row data, or a callable function. For numeric and string accessors they can be inside a 2 element tuple where the 2nd value is the default value; Similar to dict.get(lookup, default).
def get_id(self, natural_key, enhancement=None): """ Returns the technical ID for a natural key or None if the given natural key is not valid. :param T natural_key: The natural key. :param T enhancement: Enhancement data of the dimension row. :rtype: int|None """ # If the natural key is known return the technical ID immediately. if natural_key in self._map: return self._map[natural_key] # The natural key is not in the map of this dimension. Call a stored procedure for translating the natural key # to a technical key. self.pre_call_stored_procedure() success = False try: key = self.call_stored_procedure(natural_key, enhancement) success = True finally: self.post_call_stored_procedure(success) # Add the translation for natural key to technical ID to the map. self._map[natural_key] = key return key
Returns the technical ID for a natural key or None if the given natural key is not valid. :param T natural_key: The natural key. :param T enhancement: Enhancement data of the dimension row. :rtype: int|None
def write(pkg_file, pkg_rels, parts): """ Write a physical package (.pptx file) to *pkg_file* containing *pkg_rels* and *parts* and a content types stream based on the content types of the parts. """ phys_writer = PhysPkgWriter(pkg_file) PackageWriter._write_content_types_stream(phys_writer, parts) PackageWriter._write_pkg_rels(phys_writer, pkg_rels) PackageWriter._write_parts(phys_writer, parts) phys_writer.close()
Write a physical package (.pptx file) to *pkg_file* containing *pkg_rels* and *parts* and a content types stream based on the content types of the parts.
def spine_to_terminal_wedge(mol): """Arrange stereo wedge direction from spine to terminal atom """ for i, a in mol.atoms_iter(): if mol.neighbor_count(i) == 1: ni, nb = list(mol.neighbors(i).items())[0] if nb.order == 1 and nb.type in (1, 2) \ and ni > i != nb.is_lower_first: nb.is_lower_first = not nb.is_lower_first nb.type = {1: 2, 2: 1}[nb.type]
Arrange stereo wedge direction from spine to terminal atom
def create(self, sid=values.unset, phone_number=values.unset, is_reserved=values.unset): """ Create a new PhoneNumberInstance :param unicode sid: The SID of a Twilio IncomingPhoneNumber resource :param unicode phone_number: The phone number in E.164 format :param bool is_reserved: Whether the new phone number should be reserved :returns: Newly created PhoneNumberInstance :rtype: twilio.rest.proxy.v1.service.phone_number.PhoneNumberInstance """ data = values.of({'Sid': sid, 'PhoneNumber': phone_number, 'IsReserved': is_reserved, }) payload = self._version.create( 'POST', self._uri, data=data, ) return PhoneNumberInstance(self._version, payload, service_sid=self._solution['service_sid'], )
Create a new PhoneNumberInstance :param unicode sid: The SID of a Twilio IncomingPhoneNumber resource :param unicode phone_number: The phone number in E.164 format :param bool is_reserved: Whether the new phone number should be reserved :returns: Newly created PhoneNumberInstance :rtype: twilio.rest.proxy.v1.service.phone_number.PhoneNumberInstance
def compare(self, otherdigest, ishex=False): """Compute difference in bits between own digest and another. returns -127 to 128; 128 is the same, -127 is different""" bits = 0 myd = self.digest() if ishex: # convert to 32-tuple of unsighed two-byte INTs otherdigest = tuple([int(otherdigest[i:i+2],16) for i in range(0,63,2)]) for i in range(32): bits += POPC[255 & myd[i] ^ otherdigest[i]] return 128 - bits
Compute difference in bits between own digest and another. returns -127 to 128; 128 is the same, -127 is different
def api_version_elb_backend(*args, **kwargs): """ ELB and ELBV2 (Classic and Application load balancers) use the same hostname and url space. To differentiate them we must read the `Version` parameter out of the url-encoded request body. TODO: There has _got_ to be a better way to do this. Please help us think of one. """ request = args[0] if hasattr(request, 'values'): # boto3 version = request.values.get('Version') elif isinstance(request, AWSPreparedRequest): # boto in-memory version = parse_qs(request.body).get('Version')[0] else: # boto in server mode request.parse_request() version = request.querystring.get('Version')[0] if '2012-06-01' == version: return ELBResponse.dispatch(*args, **kwargs) elif '2015-12-01' == version: return ELBV2Response.dispatch(*args, **kwargs) else: raise Exception("Unknown ELB API version: {}".format(version))
ELB and ELBV2 (Classic and Application load balancers) use the same hostname and url space. To differentiate them we must read the `Version` parameter out of the url-encoded request body. TODO: There has _got_ to be a better way to do this. Please help us think of one.
def delete_resource(self, resource_id): """Deletes a ``Resource``. arg: resource_id (osid.id.Id): the ``Id`` of the ``Resource`` to remove raise: NotFound - ``resource_id`` not found raise: NullArgument - ``resource_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceAdminSession.delete_resource_template collection = JSONClientValidated('resource', collection='Resource', runtime=self._runtime) if not isinstance(resource_id, ABCId): raise errors.InvalidArgument('the argument is not a valid OSID Id') resource_map = collection.find_one( dict({'_id': ObjectId(resource_id.get_identifier())}, **self._view_filter())) objects.Resource(osid_object_map=resource_map, runtime=self._runtime, proxy=self._proxy)._delete() collection.delete_one({'_id': ObjectId(resource_id.get_identifier())})
Deletes a ``Resource``. arg: resource_id (osid.id.Id): the ``Id`` of the ``Resource`` to remove raise: NotFound - ``resource_id`` not found raise: NullArgument - ``resource_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def correct(self, images, bgImages=None, exposure_time=None, light_spectrum=None, threshold=0.1, keep_size=True, date=None, deblur=False, denoise=False): ''' exposure_time [s] date -> string e.g. '30. Nov 15' to get a calibration on from date -> {'dark current':'30. Nov 15', 'flat field':'15. Nov 15', 'lens':'14. Nov 15', 'noise':'01. Nov 15'} ''' print('CORRECT CAMERA ...') if isinstance(date, string_types) or date is None: date = {'dark current': date, 'flat field': date, 'lens': date, 'noise': date, 'psf': date} if light_spectrum is None: try: light_spectrum = self.coeffs['light spectra'][0] except IndexError: pass # do we have multiple images? if (type(images) in (list, tuple) or (isinstance(images, np.ndarray) and images.ndim == 3 and images.shape[-1] not in (3, 4) # is color )): if len(images) > 1: # 0.NOISE n = self.coeffs['noise'] if self.noise_level_function is None and len(n): n = _getFromDate(n, date['noise'])[2] self.noise_level_function = lambda x: NoiseLevelFunction.boundedFunction( x, *n) print('... remove single-time-effects from images ') # 1. STE REMOVAL ONLY IF >=2 IMAGES ARE GIVEN: ste = SingleTimeEffectDetection(images, nStd=4, noise_level_function=self.noise_level_function) image = ste.noSTE if self.noise_level_function is None: self.noise_level_function = ste.noise_level_function else: image = np.asfarray(imread(images[0], dtype=np.float)) else: image = np.asfarray(imread(images, dtype=np.float)) self._checkShape(image) self.last_light_spectrum = light_spectrum self.last_img = image # 2. BACKGROUND REMOVAL try: self._correctDarkCurrent(image, exposure_time, bgImages, date['dark current']) except Exception as errm: print('Error: %s' % errm) # 3. VIGNETTING/SENSITIVITY CORRECTION: try: self._correctVignetting(image, light_spectrum, date['flat field']) except Exception as errm: print('Error: %s' % errm) # 4. REPLACE DECECTIVE PX WITH MEDIAN FILTERED FALUE if threshold > 0: print('... remove artefacts') try: image = self._correctArtefacts(image, threshold) except Exception as errm: print('Error: %s' % errm) # 5. DEBLUR if deblur: print('... remove blur') try: image = self._correctBlur(image, light_spectrum, date['psf']) except Exception as errm: print('Error: %s' % errm) # 5. LENS CORRECTION: try: image = self._correctLens(image, light_spectrum, date['lens'], keep_size) except TypeError: 'Error: no lens calibration found' except Exception as errm: print('Error: %s' % errm) # 6. Denoise if denoise: print('... denoise ... this might take some time') image = self._correctNoise(image) print('DONE') return image
exposure_time [s] date -> string e.g. '30. Nov 15' to get a calibration on from date -> {'dark current':'30. Nov 15', 'flat field':'15. Nov 15', 'lens':'14. Nov 15', 'noise':'01. Nov 15'}
def MessageToJson(message, include_fields=None): """Convert the given message to JSON.""" result = _ProtoJsonApiTools.Get().encode_message(message) return _IncludeFields(result, message, include_fields)
Convert the given message to JSON.
def from_filename(cls, filename, sync_from_start=True): """ Create a `Lexer` from a filename. """ # Inline imports: the Pygments dependency is optional! from pygments.util import ClassNotFound from pygments.lexers import get_lexer_for_filename try: pygments_lexer = get_lexer_for_filename(filename) except ClassNotFound: return SimpleLexer() else: return cls(pygments_lexer.__class__, sync_from_start=sync_from_start)
Create a `Lexer` from a filename.
def get_default_config_help(self): """ Returns the help text for the configuration options for this handler """ config = super(StatsdHandler, self).get_default_config_help() config.update({ 'host': '', 'port': '', 'batch': '', }) return config
Returns the help text for the configuration options for this handler
def update(self): """Update value based on :math:`HV=BBV/BNV`. Required Parameters: |BBV| |BNV| Examples: >>> from hydpy.models.lstream import * >>> parameterstep('1d') >>> bbv(left=10., right=40.) >>> bnv(left=10., right=20.) >>> derived.hv.update() >>> derived.hv hv(left=1.0, right=2.0) >>> bbv(left=10., right=0.) >>> bnv(left=0., right=20.) >>> derived.hv.update() >>> derived.hv hv(0.0) """ con = self.subpars.pars.control self(0.) for idx in range(2): if (con.bbv[idx] > 0.) and (con.bnv[idx] > 0.): self.values[idx] = con.bbv[idx]/con.bnv[idx]
Update value based on :math:`HV=BBV/BNV`. Required Parameters: |BBV| |BNV| Examples: >>> from hydpy.models.lstream import * >>> parameterstep('1d') >>> bbv(left=10., right=40.) >>> bnv(left=10., right=20.) >>> derived.hv.update() >>> derived.hv hv(left=1.0, right=2.0) >>> bbv(left=10., right=0.) >>> bnv(left=0., right=20.) >>> derived.hv.update() >>> derived.hv hv(0.0)
def get_web_auth_session_key(self, url, token=""): """ Retrieves the session key of a web authorization process by its URL. """ session_key, _username = self.get_web_auth_session_key_username(url, token) return session_key
Retrieves the session key of a web authorization process by its URL.
def make_grid(xx, yy): """ Returns two n-by-n matrices. The first one contains all the x values and the second all the y values of a cartesian product between `xx` and `yy`. """ n = len(xx) xx, yy = np.meshgrid(xx, yy) grid = np.array([xx.ravel(), yy.ravel()]).T x = grid[:, 0].reshape(n, n) y = grid[:, 1].reshape(n, n) return x, y
Returns two n-by-n matrices. The first one contains all the x values and the second all the y values of a cartesian product between `xx` and `yy`.
def get_login_information(self, code=None): """Return Clef user info after exchanging code for OAuth token.""" # do the handshake to get token access_token = self._get_access_token(code) # make request with token to get user details return self._get_user_info(access_token)
Return Clef user info after exchanging code for OAuth token.
def make_osm_query(query): """ Make a request to OSM and return the parsed JSON. Parameters ---------- query : str A string in the Overpass QL format. Returns ------- data : dict """ osm_url = 'http://www.overpass-api.de/api/interpreter' req = requests.get(osm_url, params={'data': query}) req.raise_for_status() return req.json()
Make a request to OSM and return the parsed JSON. Parameters ---------- query : str A string in the Overpass QL format. Returns ------- data : dict
def read_stream (stream): """Python 3 compat note: we're assuming `stream` gives bytes not unicode.""" section = None key = None data = None for fullline in stream: line = fullline.split ('#', 1)[0] m = sectionre.match (line) if m is not None: # New section if section is not None: if key is not None: section.set_one (key, data.strip ().decode ('utf8')) key = data = None yield section section = Holder () section.section = m.group (1) continue if len (line.strip ()) == 0: if key is not None: section.set_one (key, data.strip ().decode ('utf8')) key = data = None continue m = escre.match (fullline) if m is not None: if section is None: raise InifileError ('key seen without section!') if key is not None: section.set_one (key, data.strip ().decode ('utf8')) key = m.group (1) data = m.group (2).replace (r'\"', '"').replace (r'\n', '\n').replace (r'\\', '\\') section.set_one (key, data.decode ('utf8')) key = data = None continue m = keyre.match (line) if m is not None: if section is None: raise InifileError ('key seen without section!') if key is not None: section.set_one (key, data.strip ().decode ('utf8')) key = m.group (1) data = m.group (2) if not len (data): data = ' ' elif not data[-1].isspace (): data += ' ' continue if line[0].isspace () and key is not None: data += line.strip () + ' ' continue raise InifileError ('unparsable line: ' + line[:-1]) if section is not None: if key is not None: section.set_one (key, data.strip ().decode ('utf8')) yield section
Python 3 compat note: we're assuming `stream` gives bytes not unicode.
def interpolate2dStructuredPointSpreadIDW(grid, mask, kernel=15, power=2, maxIter=1e5, copy=True): ''' same as interpolate2dStructuredIDW but using the point spread method this is faster if there are bigger connected masked areas and the border length is smaller replace all values in [grid] indicated by [mask] with the inverse distance weighted interpolation of all values within px+-kernel [power] -> distance weighting factor: 1/distance**[power] [copy] -> False: a bit faster, but modifies 'grid' and 'mask' ''' assert grid.shape == mask.shape, 'grid and mask shape are different' border = np.zeros(shape=mask.shape, dtype=np.bool) if copy: # copy mask as well because if will be modified later: mask = mask.copy() grid = grid.copy() return _calc(grid, mask, border, kernel, power, maxIter)
same as interpolate2dStructuredIDW but using the point spread method this is faster if there are bigger connected masked areas and the border length is smaller replace all values in [grid] indicated by [mask] with the inverse distance weighted interpolation of all values within px+-kernel [power] -> distance weighting factor: 1/distance**[power] [copy] -> False: a bit faster, but modifies 'grid' and 'mask'
def psq2(d1, d2): """Compute the PSQ2 measure. Args: d1 (np.ndarray): The first distribution. d2 (np.ndarray): The second distribution. """ d1, d2 = flatten(d1), flatten(d2) def f(p): return sum((p ** 2) * np.nan_to_num(np.log(p * len(p)))) return abs(f(d1) - f(d2))
Compute the PSQ2 measure. Args: d1 (np.ndarray): The first distribution. d2 (np.ndarray): The second distribution.
def add_proxy_auth(possible_proxy_url, proxy_auth): """ Add a username and password to a proxy URL, if the input value is a proxy URL. :param str possible_proxy_url: Proxy URL or ``DIRECT``. :param requests.auth.HTTPProxyAuth proxy_auth: Proxy authentication info. :returns: Proxy URL with auth info added, or ``DIRECT``. :rtype: str """ if possible_proxy_url == 'DIRECT': return possible_proxy_url parsed = urlparse(possible_proxy_url) return '{0}://{1}:{2}@{3}'.format(parsed.scheme, proxy_auth.username, proxy_auth.password, parsed.netloc)
Add a username and password to a proxy URL, if the input value is a proxy URL. :param str possible_proxy_url: Proxy URL or ``DIRECT``. :param requests.auth.HTTPProxyAuth proxy_auth: Proxy authentication info. :returns: Proxy URL with auth info added, or ``DIRECT``. :rtype: str
def owner(self): r""" Return the name of the owner of this file or directory. This follows symbolic links. On Windows, this returns a name of the form ur'DOMAIN\User Name'. On Windows, a group can own a file or directory. """ if os.name == 'nt': if win32security is None: raise Exception("path.owner requires win32all to be installed") desc = win32security.GetFileSecurity( self, win32security.OWNER_SECURITY_INFORMATION) sid = desc.GetSecurityDescriptorOwner() account, domain, typecode = win32security.LookupAccountSid(None, sid) return domain + u'\\' + account else: if pwd is None: raise NotImplementedError("path.owner is not implemented on this platform.") st = self.stat() return pwd.getpwuid(st.st_uid).pw_name
r""" Return the name of the owner of this file or directory. This follows symbolic links. On Windows, this returns a name of the form ur'DOMAIN\User Name'. On Windows, a group can own a file or directory.
def netconf_session_end_termination_reason(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") netconf_session_end = ET.SubElement(config, "netconf-session-end", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-notifications") termination_reason = ET.SubElement(netconf_session_end, "termination-reason") termination_reason.text = kwargs.pop('termination_reason') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def main(): '''Main entry point for the bioinfo CLI.''' args = docopt(__doc__, version=__version__) if 'bam_coverage' in args: bam_coverage(args['<reference>'], args['<alignments>'], int(args['<minmatch>']), min_mapq=int(args['--mapq']), min_len=float(args['--minlen']))
Main entry point for the bioinfo CLI.
def makeLUTfromCTF(sclist, N=None): """ Use a Color Transfer Function to generate colors in a vtk lookup table. See `here <http://www.vtk.org/doc/nightly/html/classvtkColorTransferFunction.html>`_. :param list sclist: a list in the form ``[(scalar1, [r,g,b]), (scalar2, 'blue'), ...]``. :return: the lookup table object ``vtkLookupTable``. This can be fed into ``colorMap``. """ ctf = vtk.vtkColorTransferFunction() ctf.SetColorSpaceToDiverging() for sc in sclist: scalar, col = sc r, g, b = getColor(col) ctf.AddRGBPoint(scalar, r, g, b) if N is None: N = len(sclist) lut = vtk.vtkLookupTable() lut.SetNumberOfTableValues(N) lut.Build() for i in range(N): rgb = list(ctf.GetColor(float(i) / N)) + [1] lut.SetTableValue(i, rgb) return lut
Use a Color Transfer Function to generate colors in a vtk lookup table. See `here <http://www.vtk.org/doc/nightly/html/classvtkColorTransferFunction.html>`_. :param list sclist: a list in the form ``[(scalar1, [r,g,b]), (scalar2, 'blue'), ...]``. :return: the lookup table object ``vtkLookupTable``. This can be fed into ``colorMap``.
def blast_seqs_to_pdb(self, seq_ident_cutoff=0, evalue=0.0001, all_genes=False, display_link=False, outdir=None, force_rerun=False): """BLAST each representative protein sequence to the PDB. Saves raw BLAST results (XML files). Args: seq_ident_cutoff (float, optional): Cutoff results based on percent coverage (in decimal form) evalue (float, optional): Cutoff for the E-value - filters for significant hits. 0.001 is liberal, 0.0001 is stringent (default). all_genes (bool): If all genes should be BLASTed, or only those without any structures currently mapped display_link (bool, optional): Set to True if links to the HTML results should be displayed outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories were not created initially force_rerun (bool, optional): If existing BLAST results should not be used, set to True. Default is False """ counter = 0 for g in tqdm(self.genes_with_a_representative_sequence): # If all_genes=False, BLAST only genes without a uniprot -> pdb mapping if g.protein.num_structures_experimental > 0 and not all_genes and not force_rerun: log.debug('{}: skipping BLAST, {} experimental structures already mapped ' 'and all_genes flag is False'.format(g.id, g.protein.num_structures_experimental)) continue # BLAST the sequence to the PDB new_pdbs = g.protein.blast_representative_sequence_to_pdb(seq_ident_cutoff=seq_ident_cutoff, evalue=evalue, display_link=display_link, outdir=outdir, force_rerun=force_rerun) if new_pdbs: counter += 1 log.debug('{}: {} PDBs BLASTed'.format(g.id, len(new_pdbs))) else: log.debug('{}: no BLAST results'.format(g.id)) log.info('Completed sequence --> PDB BLAST. See the "df_pdb_blast" attribute for a summary dataframe.') log.info('{}: number of genes with additional structures added from BLAST'.format(counter))
BLAST each representative protein sequence to the PDB. Saves raw BLAST results (XML files). Args: seq_ident_cutoff (float, optional): Cutoff results based on percent coverage (in decimal form) evalue (float, optional): Cutoff for the E-value - filters for significant hits. 0.001 is liberal, 0.0001 is stringent (default). all_genes (bool): If all genes should be BLASTed, or only those without any structures currently mapped display_link (bool, optional): Set to True if links to the HTML results should be displayed outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories were not created initially force_rerun (bool, optional): If existing BLAST results should not be used, set to True. Default is False
def get_mean_and_stddevs(self, sctx, rctx, dctx, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ # Get original mean and standard deviations mean, stddevs = super().get_mean_and_stddevs( sctx, rctx, dctx, imt, stddev_types) cff = SInterCan15Mid.SITE_COEFFS[imt] mean += np.log(cff['mf']) return mean, stddevs
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
def reset_generation(self, trigger): """Re-arms the analog output according to current settings :param trigger: name of the trigger terminal. ``None`` value means generation begins immediately on run :type trigger: str """ self.tone_lock.acquire() npts = self.stim.size try: self.aotask = AOTaskFinite(self.aochan, self.fs, npts, trigsrc=trigger) self.aotask.write(self.stim) if self.attenuator is not None: self.attenuator.SetAtten(self.atten) else: # print "ERROR: attenuation not set!" pass # raise self.ngenerated +=1 if self.stim_changed: new_gen = self.stim else: new_gen = None self.stim_changed = False except: print u'ERROR! TERMINATE!' self.tone_lock.release() raise self.tone_lock.release() return new_gen
Re-arms the analog output according to current settings :param trigger: name of the trigger terminal. ``None`` value means generation begins immediately on run :type trigger: str
def visit_BinOp(self, node: AST, dfltChaining: bool = True) -> str: """Return `node`s operator and operands as inlined expression.""" op = node.op with self.op_man(op): if isinstance(op, ast.Pow): # Pow chains right-to-left src = self.visit(op).join((self.visit(node.left, dfltChaining=False), self.visit(node.right))) else: src = self.visit(op).join((self.visit(node.left), self.visit(node.right, dfltChaining=False))) return self.wrap_expr(src, dfltChaining)
Return `node`s operator and operands as inlined expression.
def create(self, fail_on_found=False, force_on_exists=False, **kwargs): """Create a notification template. All required configuration-related fields (required according to notification_type) must be provided. There are two types of notification template creation: isolatedly creating a new notification template and creating a new notification template under a job template. Here the two types are discriminated by whether to provide --job-template option. --status option controls more specific, job-run-status-related association. Fields in the resource's `identity` tuple are used for a lookup; if a match is found, then no-op (unless `force_on_exists` is set) but do not fail (unless `fail_on_found` is set). =====API DOCS===== Create an object. :param fail_on_found: Flag that if set, the operation fails if an object matching the unique criteria already exists. :type fail_on_found: bool :param force_on_exists: Flag that if set, then if a match is found on unique fields, other fields will be updated to the provided values.; If unset, a match causes the request to be a no-op. :type force_on_exists: bool :param `**kwargs`: Keyword arguments which, all together, will be used as POST body to create the resource object. :returns: A dictionary combining the JSON output of the created resource, as well as two extra fields: "changed", a flag indicating if the resource is created successfully; "id", an integer which is the primary key of the created object. :rtype: dict =====API DOCS===== """ config_item = self._separate(kwargs) jt_id = kwargs.pop('job_template', None) status = kwargs.pop('status', 'any') old_endpoint = self.endpoint if jt_id is not None: jt = get_resource('job_template') jt.get(pk=jt_id) try: nt_id = self.get(**copy.deepcopy(kwargs))['id'] except exc.NotFound: pass else: if fail_on_found: raise exc.TowerCLIError('Notification template already ' 'exists and fail-on-found is ' 'switched on. Please use' ' "associate_notification" method' ' of job_template instead.') else: debug.log('Notification template already exists, ' 'associating with job template.', header='details') return jt.associate_notification_template( jt_id, nt_id, status=status) self.endpoint = '/job_templates/%d/notification_templates_%s/' %\ (jt_id, status) self._configuration(kwargs, config_item) result = super(Resource, self).create(**kwargs) self.endpoint = old_endpoint return result
Create a notification template. All required configuration-related fields (required according to notification_type) must be provided. There are two types of notification template creation: isolatedly creating a new notification template and creating a new notification template under a job template. Here the two types are discriminated by whether to provide --job-template option. --status option controls more specific, job-run-status-related association. Fields in the resource's `identity` tuple are used for a lookup; if a match is found, then no-op (unless `force_on_exists` is set) but do not fail (unless `fail_on_found` is set). =====API DOCS===== Create an object. :param fail_on_found: Flag that if set, the operation fails if an object matching the unique criteria already exists. :type fail_on_found: bool :param force_on_exists: Flag that if set, then if a match is found on unique fields, other fields will be updated to the provided values.; If unset, a match causes the request to be a no-op. :type force_on_exists: bool :param `**kwargs`: Keyword arguments which, all together, will be used as POST body to create the resource object. :returns: A dictionary combining the JSON output of the created resource, as well as two extra fields: "changed", a flag indicating if the resource is created successfully; "id", an integer which is the primary key of the created object. :rtype: dict =====API DOCS=====
def _get_link_status_code(link, allow_redirects=False, timeout=5): """ Get the status code of a link. If the timeout is exceeded, will return a 404. For a list of available status codes, see: https://en.wikipedia.org/wiki/List_of_HTTP_status_codes """ status_code = None try: response = requests.get( link, allow_redirects=allow_redirects, timeout=timeout) status_code = response.status_code except Exception: status_code = 404 return status_code
Get the status code of a link. If the timeout is exceeded, will return a 404. For a list of available status codes, see: https://en.wikipedia.org/wiki/List_of_HTTP_status_codes
def toDict(self): """ Get information about a title alignment as a dictionary. @return: A C{dict} representation of the title aligment. """ return { 'hsps': [hsp.toDict() for hsp in self.hsps], 'read': self.read.toDict(), }
Get information about a title alignment as a dictionary. @return: A C{dict} representation of the title aligment.
def _compute(self): """ The main method of the class, which computes an MCS given its over-approximation. The over-approximation is defined by a model for the hard part of the formula obtained in :func:`compute`. The method is essentially a simple loop going over all literals unsatisfied by the previous model, i.e. the literals of ``self.setd`` and checking which literals can be satisfied. This process can be seen a refinement of the over-approximation of the MCS. The algorithm follows the pseudo-code of the LBX algorithm presented in [1]_. Additionally, if :class:`LBX` was constructed with the requirement to make "clause :math:`D`" calls, the method calls :func:`do_cld_check` at every iteration of the loop using the literals of ``self.setd`` not yet checked, as the contents of "clause :math:`D`". """ # unless clause D checks are used, test one literal at a time # and add it either to satisfied of backbone assumptions i = 0 while i < len(self.setd): if self.ucld: self.do_cld_check(self.setd[i:]) i = 0 if self.setd: # if may be empty after the clause D check if self.oracle.solve(assumptions=self.ss_assumps + self.bb_assumps + [self.setd[i]]): # filtering satisfied clauses self._filter_satisfied() else: # current literal is backbone self.bb_assumps.append(-self.setd[i]) i += 1
The main method of the class, which computes an MCS given its over-approximation. The over-approximation is defined by a model for the hard part of the formula obtained in :func:`compute`. The method is essentially a simple loop going over all literals unsatisfied by the previous model, i.e. the literals of ``self.setd`` and checking which literals can be satisfied. This process can be seen a refinement of the over-approximation of the MCS. The algorithm follows the pseudo-code of the LBX algorithm presented in [1]_. Additionally, if :class:`LBX` was constructed with the requirement to make "clause :math:`D`" calls, the method calls :func:`do_cld_check` at every iteration of the loop using the literals of ``self.setd`` not yet checked, as the contents of "clause :math:`D`".
def delete_user(self, user_email): '''**Description** Deletes a user from Sysdig Monitor. **Arguments** - **user_email**: the email address of the user that will be deleted from Sysdig Monitor **Example** `examples/user_team_mgmt.py <https://github.com/draios/python-sdc-client/blob/master/examples/user_team_mgmt.py>`_ ''' res = self.get_user_ids([user_email]) if res[0] == False: return res userid = res[1][0] res = requests.delete(self.url + '/api/users/' + str(userid), headers=self.hdrs, verify=self.ssl_verify) if not self._checkResponse(res): return [False, self.lasterr] return [True, None]
**Description** Deletes a user from Sysdig Monitor. **Arguments** - **user_email**: the email address of the user that will be deleted from Sysdig Monitor **Example** `examples/user_team_mgmt.py <https://github.com/draios/python-sdc-client/blob/master/examples/user_team_mgmt.py>`_
def get_content_metadata(self, enterprise_customer): """ Return all content metadata contained in the catalogs associated with the EnterpriseCustomer. Arguments: enterprise_customer (EnterpriseCustomer): The EnterpriseCustomer to return content metadata for. Returns: list: List of dicts containing content metadata. """ content_metadata = OrderedDict() # TODO: This if block can be removed when we get rid of discovery service-based catalogs. if enterprise_customer.catalog: response = self._load_data( self.ENTERPRISE_CUSTOMER_ENDPOINT, detail_resource='courses', resource_id=str(enterprise_customer.uuid), traverse_pagination=True, ) for course in response['results']: for course_run in course['course_runs']: course_run['content_type'] = 'courserun' # Make this look like a search endpoint result. content_metadata[course_run['key']] = course_run for enterprise_customer_catalog in enterprise_customer.enterprise_customer_catalogs.all(): response = self._load_data( self.ENTERPRISE_CUSTOMER_CATALOGS_ENDPOINT, resource_id=str(enterprise_customer_catalog.uuid), traverse_pagination=True, querystring={'page_size': 1000}, ) for item in response['results']: content_id = utils.get_content_metadata_item_id(item) content_metadata[content_id] = item return content_metadata.values()
Return all content metadata contained in the catalogs associated with the EnterpriseCustomer. Arguments: enterprise_customer (EnterpriseCustomer): The EnterpriseCustomer to return content metadata for. Returns: list: List of dicts containing content metadata.
def get_failing_line(xml_string, exc_msg): """ Extract the failing line from the XML string, as indicated by the line/column information in the exception message. Returns a tuple (lineno, colno, new_pos, line), where lineno and colno and marker_pos may be None. """ max_before = 500 # max characters before reported position max_after = 500 # max characters after reported position max_unknown = 1000 # max characters when position cannot be determined assert isinstance(xml_string, six.binary_type) m = re.search(r':(\d+):(\d+):', exc_msg) if not m: xml_string, _ = truncate_line(xml_string, 1, 0, max_unknown - 1) return None, None, None, xml_string lineno = int(m.group(1)) colno = int(m.group(2)) if not xml_string.endswith(b'\n'): xml_string += b'\n' xml_lines = xml_string.splitlines() if len(xml_lines) < lineno: # This really should not happen; it means the line parsing went wrong # or SAX reported incorrect lines. We do not particularly care for # this case and simply truncate the string. xml_string, _ = truncate_line(xml_string, 1, 0, max_unknown - 1) return None, None, None, xml_string line = xml_lines[lineno - 1] line, new_pos = truncate_line(line, colno, max_before, max_after) return lineno, colno, new_pos, line
Extract the failing line from the XML string, as indicated by the line/column information in the exception message. Returns a tuple (lineno, colno, new_pos, line), where lineno and colno and marker_pos may be None.
def get_evcodes(self, inc_set=None, exc_set=None): """Get evidence code for all but NOT 'No biological data'""" codes = self.get_evcodes_all(inc_set, exc_set) codes.discard('ND') return codes
Get evidence code for all but NOT 'No biological data
def get_unit_property_names(self, unit_id=None): '''Get a list of property names for a given unit, or for all units if unit_id is None Parameters ---------- unit_id: int The unit id for which the property names will be returned If None (default), will return property names for all units Returns ---------- property_names The list of property names from the specified unit(s) ''' if unit_id is None: property_names = [] for unit_id in self.get_unit_ids(): curr_property_names = self.get_unit_property_names(unit_id) for curr_property_name in curr_property_names: property_names.append(curr_property_name) property_names = sorted(list(set(property_names))) return property_names if isinstance(unit_id, (int, np.integer)): if unit_id in self.get_unit_ids(): if unit_id not in self._unit_properties: self._unit_properties[unit_id] = {} property_names = sorted(self._unit_properties[unit_id].keys()) return property_names else: raise ValueError(str(unit_id) + " is not a valid unit_id") else: raise ValueError(str(unit_id) + " must be an int")
Get a list of property names for a given unit, or for all units if unit_id is None Parameters ---------- unit_id: int The unit id for which the property names will be returned If None (default), will return property names for all units Returns ---------- property_names The list of property names from the specified unit(s)
def _convert_to_bytes(type_name, value): """Convert a typed value to a binary array""" int_types = {'uint8_t': 'B', 'int8_t': 'b', 'uint16_t': 'H', 'int16_t': 'h', 'uint32_t': 'L', 'int32_t': 'l'} type_name = type_name.lower() if type_name not in int_types and type_name not in ['string', 'binary']: raise ArgumentError('Type must be a known integer type, integer type array, string', known_integers=int_types.keys(), actual_type=type_name) if type_name == 'string': #value should be passed as a string bytevalue = bytes(value) elif type_name == 'binary': bytevalue = bytes(value) else: bytevalue = struct.pack("<%s" % int_types[type_name], value) return bytevalue
Convert a typed value to a binary array
def grep(expression, file, flags=0, invert=False): """ Search a file and return a list of all lines that match a regular expression. :param str expression: The regex to search for. :param file: The file to search in. :type file: str, file :param int flags: The regex flags to use when searching. :param bool invert: Select non matching lines instead. :return: All the matching lines. :rtype: list """ # requirements = re if isinstance(file, str): file = open(file) lines = [] for line in file: if bool(re.search(expression, line, flags=flags)) ^ invert: lines.append(line) return lines
Search a file and return a list of all lines that match a regular expression. :param str expression: The regex to search for. :param file: The file to search in. :type file: str, file :param int flags: The regex flags to use when searching. :param bool invert: Select non matching lines instead. :return: All the matching lines. :rtype: list
def _escape_sequence(self, char): """ Handle characters seen when in an escape sequence. Most non-vt52 commands start with a left-bracket after the escape and then a stream of parameters and a command. """ num = ord(char) if char == "[": self.state = "escape-lb" elif char == "(": self.state = "charset-g0" elif char == ")": self.state = "charset-g1" elif num in self.escape: self.dispatch(self.escape[num]) self.state = "stream" elif self.fail_on_unknown_esc: raise StreamProcessError("Unexpected character '%c' == '0x%02x'" % (char, ord(char)))
Handle characters seen when in an escape sequence. Most non-vt52 commands start with a left-bracket after the escape and then a stream of parameters and a command.
def plot_campaign_outline(self, campaign=0, facecolor="#666666", text=None): """Plot the outline of a campaign as a contiguous gray patch. Parameters ---------- campaign : int K2 Campaign number. facecolor : str Color of the patch. """ # The outline is composed of two filled rectangles, # defined by the first coordinate of the corner of four channels each fov = getKeplerFov(campaign) corners = fov.getCoordsOfChannelCorners() for rectangle in [[4, 75, 84, 11], [15, 56, 71, 32]]: ra_outline, dec_outline = [], [] for channel in rectangle: idx = np.where(corners[::, 2] == channel) ra_outline.append(corners[idx, 3][0][0]) dec_outline.append(corners[idx, 4][0][0]) ra = np.array(ra_outline + ra_outline[:1]) dec = np.array(dec_outline + dec_outline[:1]) if campaign == 1002: # Overlaps the meridian ra[ra > 180] -= 360 myfill = self.ax.fill(ra, dec, facecolor=facecolor, zorder=151, lw=0) # Print the campaign number on top of the outline if text is None: text = "{}".format(campaign) ra_center, dec_center, _ = fov.getBoresight() if campaign == 6: dec_center -= 2 elif campaign == 12: ra_center += 0.5 dec_center -= 1.7 elif campaign == 13: dec_center -= 1.5 elif campaign == 16: dec_center += 1.5 elif campaign == 18: dec_center -= 1.5 elif campaign == 19: dec_center += 1.7 elif campaign == 20: dec_center += 1.5 offsets = {5: (40, -20), 16: (-20, 40), 18: (-15, -50)} if campaign in [5]: pl.annotate(text, xy=(ra_center, dec_center), xycoords='data', ha='center', xytext=offsets[campaign], textcoords='offset points', size=18, zorder=0, color=facecolor, arrowprops=dict(arrowstyle="-", ec=facecolor, lw=2)) else: self.ax.text(ra_center, dec_center, text, fontsize=18, color="white", ha="center", va="center", zorder=155) return myfill
Plot the outline of a campaign as a contiguous gray patch. Parameters ---------- campaign : int K2 Campaign number. facecolor : str Color of the patch.
def asbaseline(self, pos): """Convert a position measure into a baseline measure. No actual baseline is calculated, since operations can be done on positions, with subtractions to obtain baselines at a later stage. :param pos: a position measure :returns: a baseline measure """ if not is_measure(pos) or pos['type'] not in ['position', 'baseline']: raise TypeError('Argument is not a position/baseline measure') if pos['type'] == 'position': loc = self.measure(pos, 'itrf') loc['type'] = 'baseline' return self.measure(loc, 'j2000') return pos
Convert a position measure into a baseline measure. No actual baseline is calculated, since operations can be done on positions, with subtractions to obtain baselines at a later stage. :param pos: a position measure :returns: a baseline measure
def get_permissions(self): """ :returns: list of dicts, or an empty list if there are no permissions. """ path = Client.urls['all_permissions'] conns = self._call(path, 'GET') return conns
:returns: list of dicts, or an empty list if there are no permissions.
def get_memory_usage(pid=None,timeout=1): '''get_memory_usage returns a dictionary of resident set size (rss) and virtual memory size (vms) for a process of interest, for as long as the process is running :param pid: the pid to use: :param timeout: the timeout :: notes example: sleep 3 & exec python -m memory "$!" ''' rss = [] vms = [] # If no pid is provided, look for environment variable pid = get_pid(pid) process = psutil.Process(pid) # Create lists of memory usage over time print(process.status()) while process.status() == 'running': mem = process.memory_info() rss.append(mem.rss) vms.append(mem.vms) time.sleep(timeout) # http://pythonhosted.org/psutil/#psutil.Process.memory_info result = {"rss":rss,"vms":vms} print(result)
get_memory_usage returns a dictionary of resident set size (rss) and virtual memory size (vms) for a process of interest, for as long as the process is running :param pid: the pid to use: :param timeout: the timeout :: notes example: sleep 3 & exec python -m memory "$!"
def _add_entry(self, formdata=None, data=unset_value, index=None): ''' Fill the form with previous data if necessary to handle partial update ''' if formdata: prefix = '-'.join((self.name, str(index))) basekey = '-'.join((prefix, '{0}')) idkey = basekey.format('id') if prefix in formdata: formdata[idkey] = formdata.pop(prefix) if hasattr(self.nested_model, 'id') and idkey in formdata: id = self.nested_model.id.to_python(formdata[idkey]) data = get_by(self.initial_data, 'id', id) initial = flatten_json(self.nested_form, data.to_mongo(), prefix) for key, value in initial.items(): if key not in formdata: formdata[key] = value else: data = None return super(NestedModelList, self)._add_entry(formdata, data, index)
Fill the form with previous data if necessary to handle partial update
def bunzip2(filename): """Uncompress `filename` in place""" log.debug("Uncompressing %s", filename) tmpfile = "%s.tmp" % filename os.rename(filename, tmpfile) b = bz2.BZ2File(tmpfile) f = open(filename, "wb") while True: block = b.read(512 * 1024) if not block: break f.write(block) f.close() b.close() shutil.copystat(tmpfile, filename) shutil.copymode(tmpfile, filename) os.unlink(tmpfile)
Uncompress `filename` in place
def commit(self, **kwargs): r"""Store changes of the current record instance in the database. #. Send a signal :data:`invenio_records.signals.before_record_update` with the current record to be committed as parameter. #. Validate the current record data. #. Commit the current record in the database. #. Send a signal :data:`invenio_records.signals.after_record_update` with the committed record as parameter. :Keyword Arguments: * **format_checker** -- An instance of the class :class:`jsonschema.FormatChecker`, which contains validation rules for formats. See :func:`~invenio_records.api.RecordBase.validate` for more details. * **validator** -- A :class:`jsonschema.IValidator` class that will be used to validate the record. See :func:`~invenio_records.api.RecordBase.validate` for more details. :returns: The :class:`Record` instance. """ if self.model is None or self.model.json is None: raise MissingModelError() with db.session.begin_nested(): before_record_update.send( current_app._get_current_object(), record=self ) self.validate(**kwargs) self.model.json = dict(self) flag_modified(self.model, 'json') db.session.merge(self.model) after_record_update.send( current_app._get_current_object(), record=self ) return self
r"""Store changes of the current record instance in the database. #. Send a signal :data:`invenio_records.signals.before_record_update` with the current record to be committed as parameter. #. Validate the current record data. #. Commit the current record in the database. #. Send a signal :data:`invenio_records.signals.after_record_update` with the committed record as parameter. :Keyword Arguments: * **format_checker** -- An instance of the class :class:`jsonschema.FormatChecker`, which contains validation rules for formats. See :func:`~invenio_records.api.RecordBase.validate` for more details. * **validator** -- A :class:`jsonschema.IValidator` class that will be used to validate the record. See :func:`~invenio_records.api.RecordBase.validate` for more details. :returns: The :class:`Record` instance.
def open_new_window(self, switch_to=True): """ Opens a new browser tab/window and switches to it by default. """ self.driver.execute_script("window.open('');") time.sleep(0.01) if switch_to: self.switch_to_window(len(self.driver.window_handles) - 1)
Opens a new browser tab/window and switches to it by default.
def _add_app_menu(self): """ Create a default Cocoa menu that shows 'Services', 'Hide', 'Hide Others', 'Show All', and 'Quit'. Will append the application name to some menu items if it's available. """ # Set the main menu for the application mainMenu = AppKit.NSMenu.alloc().init() self.app.setMainMenu_(mainMenu) # Create an application menu and make it a submenu of the main menu mainAppMenuItem = AppKit.NSMenuItem.alloc().init() mainMenu.addItem_(mainAppMenuItem) appMenu = AppKit.NSMenu.alloc().init() mainAppMenuItem.setSubmenu_(appMenu) appMenu.addItemWithTitle_action_keyEquivalent_(self._append_app_name(localization["cocoa.menu.about"]), "orderFrontStandardAboutPanel:", "") appMenu.addItem_(AppKit.NSMenuItem.separatorItem()) # Set the 'Services' menu for the app and create an app menu item appServicesMenu = AppKit.NSMenu.alloc().init() self.app.setServicesMenu_(appServicesMenu) servicesMenuItem = appMenu.addItemWithTitle_action_keyEquivalent_(localization["cocoa.menu.services"], nil, "") servicesMenuItem.setSubmenu_(appServicesMenu) appMenu.addItem_(AppKit.NSMenuItem.separatorItem()) # Append the 'Hide', 'Hide Others', and 'Show All' menu items appMenu.addItemWithTitle_action_keyEquivalent_(self._append_app_name(localization["cocoa.menu.hide"]), "hide:", "h") hideOthersMenuItem = appMenu.addItemWithTitle_action_keyEquivalent_(localization["cocoa.menu.hideOthers"], "hideOtherApplications:", "h") hideOthersMenuItem.setKeyEquivalentModifierMask_(AppKit.NSAlternateKeyMask | AppKit.NSCommandKeyMask) appMenu.addItemWithTitle_action_keyEquivalent_(localization["cocoa.menu.showAll"], "unhideAllApplications:", "") appMenu.addItem_(AppKit.NSMenuItem.separatorItem()) # Append a 'Quit' menu item appMenu.addItemWithTitle_action_keyEquivalent_(self._append_app_name(localization["cocoa.menu.quit"]), "terminate:", "q")
Create a default Cocoa menu that shows 'Services', 'Hide', 'Hide Others', 'Show All', and 'Quit'. Will append the application name to some menu items if it's available.
def ingest(self): """*Import the IFS catalogue into the sherlock-catalogues database* The method first generates a list of python dictionaries from the IFS datafile, imports this list of dictionaries into a database table and then generates the HTMIDs for that table. **Usage:** See class docstring for usage """ self.log.debug('starting the ``get`` method') self.primaryIdColumnName = "primaryId" self.raColName = "raDeg" self.declColName = "decDeg" self.dbTableName = "tcs_cat_ifs_stream" self.databaseInsertbatchSize = 500 dictList = self._create_dictionary_of_IFS() tableName = self.dbTableName createStatement = """ CREATE TABLE `%(tableName)s` ( `primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter', `dateCreated` datetime DEFAULT CURRENT_TIMESTAMP, `decDeg` double DEFAULT NULL, `name` varchar(100) COLLATE utf8_unicode_ci DEFAULT NULL, `raDeg` double DEFAULT NULL, `z` double DEFAULT NULL, `htm16ID` bigint(20) DEFAULT NULL, `htm10ID` bigint(20) DEFAULT NULL, `htm13ID` bigint(20) DEFAULT NULL, `dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP, `updated` varchar(45) DEFAULT '0', PRIMARY KEY (`primaryId`), UNIQUE KEY `radeg_decdeg` (`raDeg`,`decDeg`), KEY `idx_htm16ID` (`htm16ID`), KEY `idx_htm10ID` (`htm10ID`), KEY `idx_htm13ID` (`htm13ID`) ) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci; """ % locals() self.add_data_to_database_table( dictList=dictList, createStatement=createStatement ) self.log.debug('completed the ``get`` method') return None
*Import the IFS catalogue into the sherlock-catalogues database* The method first generates a list of python dictionaries from the IFS datafile, imports this list of dictionaries into a database table and then generates the HTMIDs for that table. **Usage:** See class docstring for usage
def _get_minutes_from_last_update(self, time): """ How much minutes passed from last update to given time """ time_from_last_update = time - self.last_update_time return int(time_from_last_update.total_seconds() / 60)
How much minutes passed from last update to given time
def course_modal(context, course=None): """ Django template tag that returns course information to display in a modal. You may pass in a particular course if you like. Otherwise, the modal will look for course context within the parent context. Usage: {% course_modal %} {% course_modal course %} """ if course: context.update({ 'course_image_uri': course.get('course_image_uri', ''), 'course_title': course.get('course_title', ''), 'course_level_type': course.get('course_level_type', ''), 'course_short_description': course.get('course_short_description', ''), 'course_effort': course.get('course_effort', ''), 'course_full_description': course.get('course_full_description', ''), 'expected_learning_items': course.get('expected_learning_items', []), 'staff': course.get('staff', []), 'premium_modes': course.get('premium_modes', []), }) return context
Django template tag that returns course information to display in a modal. You may pass in a particular course if you like. Otherwise, the modal will look for course context within the parent context. Usage: {% course_modal %} {% course_modal course %}
def display(self, data, x=None, y=None, xlabel=None, ylabel=None, style=None, nlevels=None, levels=None, contour_labels=None, store_data=True, col=0, unzoom=True, auto_contrast=False, contrast_level=0, **kws): """ generic display, using imshow (default) or contour """ if style is not None: self.conf.style = style self.axes.cla() conf = self.conf conf.log_scale = False conf.rot, conf.flip_ud, conf.flip_lr = False, False, False conf.highlight_areas = [] if 1 in data.shape: data = data.squeeze() self.data_shape = data.shape self.data_range = [0, data.shape[1], 0, data.shape[0]] conf.contrast_level = contrast_level if auto_contrast: conf.contrast_level = 1 if x is not None: self.xdata = np.array(x) if self.xdata.shape[0] != data.shape[1]: self.xdata = None if y is not None: self.ydata = np.array(y) if self.ydata.shape[0] != data.shape[0]: self.ydata = None if xlabel is not None: self.xlab = xlabel if ylabel is not None: self.ylab = ylabel if store_data: self.conf.data = data cmap = self.conf.cmap[col] if self.conf.style == 'contour': if levels is None: levels = self.conf.ncontour_levels else: self.conf.ncontour_levels = levels if nlevels is None: nlevels = self.conf.ncontour_levels = 9 nlevels = max(2, nlevels) clevels = np.linspace(data.min(), data.max(), nlevels+1) self.conf.contour_levels = clevels self.conf.image = self.axes.contourf(data, cmap=self.conf.cmap[col], levels=clevels) self.conf.contour = self.axes.contour(data, cmap=self.conf.cmap[col], levels=clevels) cmap_name = self.conf.cmap[col].name xname = 'gray' try: if cmap_name == 'gray_r': xname = 'Reds_r' elif cmap_name == 'gray': xname = 'Reds' elif cmap_name.endswith('_r'): xname = 'gray_r' except: pass self.conf.contour.set_cmap(getattr(colormap, xname)) if contour_labels is None: contour_labels = self.conf.contour_labels if contour_labels: self.axes.clabel(self.conf.contour, fontsize=10, inline=1) if hasattr(self.contour_callback , '__call__'): self.contour_callback(levels=clevels) else: if data.max() == data.min(): img = data else: img = (data - data.min()) /(1.0*data.max() - data.min()) self.conf.image = self.axes.imshow(img, cmap=self.conf.cmap[col], interpolation=self.conf.interp) self.axes.set_axis_off() if unzoom: self.unzoom_all() if hasattr(self.data_callback, '__call__'): self.data_callback(data, x=x, y=y, **kws) self.conf.indices = None self.indices_thread = Thread(target=self.calc_indices, args=(data.shape, )) self.indices_thread.start()
generic display, using imshow (default) or contour
def highlight(self, **kwargs): """ kwargs: style: css highlight_time: int; default: .3 """ self.debug_log("Highlighting element") style = kwargs.get('style') highlight_time = kwargs.get('highlight_time', .3) driver = self._element._parent try: original_style = self._element.get_attribute('style') driver.execute_script( "arguments[0].setAttribute('style', arguments[1]);", self._element, style ) except Exception as e: self.error_log("highlight exception: %s" % str(e)) sleep(highlight_time) try: driver.execute_script( "arguments[0].setAttribute('style', arguments[1]);", self._element, original_style ) except Exception as e: self.error_log("highlight exception: %s" % str(e)) return True
kwargs: style: css highlight_time: int; default: .3
def persist(self, storageLevel=StorageLevel.MEMORY_AND_DISK): """Sets the storage level to persist the contents of the :class:`DataFrame` across operations after the first time it is computed. This can only be used to assign a new storage level if the :class:`DataFrame` does not have a storage level set yet. If no storage level is specified defaults to (C{MEMORY_AND_DISK}). .. note:: The default storage level has changed to C{MEMORY_AND_DISK} to match Scala in 2.0. """ self.is_cached = True javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel) self._jdf.persist(javaStorageLevel) return self
Sets the storage level to persist the contents of the :class:`DataFrame` across operations after the first time it is computed. This can only be used to assign a new storage level if the :class:`DataFrame` does not have a storage level set yet. If no storage level is specified defaults to (C{MEMORY_AND_DISK}). .. note:: The default storage level has changed to C{MEMORY_AND_DISK} to match Scala in 2.0.
def preview(src_path): ''' Generates a preview of src_path as PNG. :returns: A list of preview paths, one for each page. ''' previews = [] for page in list_artboards(src_path): previews.append(page.export()) for artboard in page.artboards: previews.append(artboard.export()) return previews
Generates a preview of src_path as PNG. :returns: A list of preview paths, one for each page.
def barmatch2(data, tups, cutters, longbar, matchdict, fnum): """ cleaner barmatch func... """ ## how many reads to store before writing to disk waitchunk = int(1e6) ## pid name for this engine epid = os.getpid() ## counters for total reads, those with cutsite, and those that matched filestat = np.zeros(3, dtype=np.int) ## store observed sample matches samplehits = {} ## dictionaries to store first and second reads until writing to file dsort1 = {} dsort2 = {} ## dictionary for all bars matched in sample dbars = {} ## fill for sample names for sname in data.barcodes: if "-technical-replicate-" in sname: sname = sname.rsplit("-technical-replicate", 1)[0] samplehits[sname] = 0 dsort1[sname] = [] dsort2[sname] = [] dbars[sname] = set() ## store observed bars barhits = {} for barc in matchdict: barhits[barc] = 0 ## store others misses = {} misses['_'] = 0 ## build func for finding barcode getbarcode = get_barcode_func(data, longbar) ## get quart iterator of reads if tups[0].endswith(".gz"): ofunc = gzip.open else: ofunc = open ## create iterators ofile1 = ofunc(tups[0], 'r') fr1 = iter(ofile1) quart1 = itertools.izip(fr1, fr1, fr1, fr1) if tups[1]: ofile2 = ofunc(tups[1], 'r') fr2 = iter(ofile2) quart2 = itertools.izip(fr2, fr2, fr2, fr2) quarts = itertools.izip(quart1, quart2) else: quarts = itertools.izip(quart1, iter(int, 1)) ## go until end of the file while 1: try: read1, read2 = quarts.next() read1 = list(read1) filestat[0] += 1 except StopIteration: break barcode = "" ## Get barcode_R2 and check for matching sample name if '3rad' in data.paramsdict["datatype"]: ## Here we're just reusing the findbcode function ## for R2, and reconfiguring the longbar tuple to have the ## maxlen for the R2 barcode ## Parse barcode. Use the parsing function selected above. barcode1 = find3radbcode(cutters=cutters, longbar=longbar, read1=read1) barcode2 = find3radbcode(cutters=cutters, longbar=(longbar[2], longbar[1]), read1=read2) barcode = barcode1 + "+" + barcode2 else: ## Parse barcode. Uses the parsing function selected above. barcode = getbarcode(cutters, read1, longbar) ## find if it matches sname_match = matchdict.get(barcode) if sname_match: #sample_index[filestat[0]-1] = snames.index(sname_match) + 1 ## record who matched dbars[sname_match].add(barcode) filestat[1] += 1 filestat[2] += 1 samplehits[sname_match] += 1 barhits[barcode] += 1 if barcode in barhits: barhits[barcode] += 1 else: barhits[barcode] = 1 ## trim off barcode lenbar = len(barcode) if '3rad' in data.paramsdict["datatype"]: ## Iff 3rad trim the len of the first barcode lenbar = len(barcode1) if data.paramsdict["datatype"] == '2brad': overlen = len(cutters[0][0]) + lenbar + 1 read1[1] = read1[1][:-overlen] + "\n" read1[3] = read1[3][:-overlen] + "\n" else: read1[1] = read1[1][lenbar:] read1[3] = read1[3][lenbar:] ## Trim barcode off R2 and append. Only 3rad datatype ## pays the cpu cost of splitting R2 if '3rad' in data.paramsdict["datatype"]: read2 = list(read2) read2[1] = read2[1][len(barcode2):] read2[3] = read2[3][len(barcode2):] ## append to dsort dsort1[sname_match].append("".join(read1)) if 'pair' in data.paramsdict["datatype"]: dsort2[sname_match].append("".join(read2)) else: misses["_"] += 1 if barcode: filestat[1] += 1 ## how can we make it so all of the engines aren't trying to write to ## ~100-200 files all at the same time? This is the I/O limit we hit.. ## write out at 100K to keep memory low. It is fine on HPC which can ## write parallel, but regular systems might crash if not filestat[0] % waitchunk: ## write the remaining reads to file" writetofile(data, dsort1, 1, epid) if 'pair' in data.paramsdict["datatype"]: writetofile(data, dsort2, 2, epid) ## clear out dsorts for sample in data.barcodes: if "-technical-replicate-" in sname: sname = sname.rsplit("-technical-replicate", 1)[0] dsort1[sname] = [] dsort2[sname] = [] ## reset longlist #longlist = np.zeros(waitchunk, dtype=np.uint32) ## close open files ofile1.close() if tups[1]: ofile2.close() ## write the remaining reads to file writetofile(data, dsort1, 1, epid) if 'pair' in data.paramsdict["datatype"]: writetofile(data, dsort2, 2, epid) ## return stats in saved pickle b/c return_queue is too small ## and the size of the match dictionary can become quite large samplestats = [samplehits, barhits, misses, dbars] outname = os.path.join(data.dirs.fastqs, "tmp_{}_{}.p".format(epid, fnum)) with open(outname, 'w') as wout: pickle.dump([filestat, samplestats], wout) return outname
cleaner barmatch func...
def write(self, country_code, frames, scaling_factors=None): """Write the OHLCV data for one country to the HDF5 file. Parameters ---------- country_code : str The ISO 3166 alpha-2 country code for this country. frames : dict[str, pd.DataFrame] A dict mapping each OHLCV field to a dataframe with a row for each date and a column for each sid. The dataframes need to have the same index and columns. scaling_factors : dict[str, float], optional A dict mapping each OHLCV field to a scaling factor, which is applied (as a multiplier) to the values of field to efficiently store them as uint32, while maintaining desired precision. These factors are written to the file as metadata, which is consumed by the reader to adjust back to the original float values. Default is None, in which case DEFAULT_SCALING_FACTORS is used. """ if scaling_factors is None: scaling_factors = DEFAULT_SCALING_FACTORS with self.h5_file(mode='a') as h5_file: # ensure that the file version has been written h5_file.attrs['version'] = VERSION country_group = h5_file.create_group(country_code) data_group = country_group.create_group(DATA) index_group = country_group.create_group(INDEX) lifetimes_group = country_group.create_group(LIFETIMES) # Note that this functions validates that all of the frames # share the same days and sids. days, sids = days_and_sids_for_frames(list(frames.values())) # Write sid and date indices. index_group.create_dataset(SID, data=sids) # h5py does not support datetimes, so they need to be stored # as integers. index_group.create_dataset(DAY, data=days.astype(np.int64)) log.debug( 'Wrote {} group to file {}', index_group.name, self._filename, ) # Write start and end dates for each sid. start_date_ixs, end_date_ixs = compute_asset_lifetimes(frames) lifetimes_group.create_dataset(START_DATE, data=start_date_ixs) lifetimes_group.create_dataset(END_DATE, data=end_date_ixs) if len(sids): chunks = (len(sids), min(self._date_chunk_size, len(days))) else: # h5py crashes if we provide chunks for empty data. chunks = None for field in FIELDS: frame = frames[field] # Sort rows by increasing sid, and columns by increasing date. frame.sort_index(inplace=True) frame.sort_index(axis='columns', inplace=True) data = coerce_to_uint32( frame.T.fillna(0).values, scaling_factors[field], ) dataset = data_group.create_dataset( field, compression='lzf', shuffle=True, data=data, chunks=chunks, ) dataset.attrs[SCALING_FACTOR] = scaling_factors[field] log.debug( 'Writing dataset {} to file {}', dataset.name, self._filename )
Write the OHLCV data for one country to the HDF5 file. Parameters ---------- country_code : str The ISO 3166 alpha-2 country code for this country. frames : dict[str, pd.DataFrame] A dict mapping each OHLCV field to a dataframe with a row for each date and a column for each sid. The dataframes need to have the same index and columns. scaling_factors : dict[str, float], optional A dict mapping each OHLCV field to a scaling factor, which is applied (as a multiplier) to the values of field to efficiently store them as uint32, while maintaining desired precision. These factors are written to the file as metadata, which is consumed by the reader to adjust back to the original float values. Default is None, in which case DEFAULT_SCALING_FACTORS is used.
def create(self, target, configuration_url=values.unset, configuration_method=values.unset, configuration_filters=values.unset, configuration_triggers=values.unset, configuration_flow_sid=values.unset, configuration_retry_count=values.unset, configuration_replay_after=values.unset, configuration_buffer_messages=values.unset, configuration_buffer_window=values.unset): """ Create a new WebhookInstance :param WebhookInstance.Target target: The target of this webhook. :param unicode configuration_url: The absolute url the webhook request should be sent to. :param WebhookInstance.Method configuration_method: The HTTP method to be used when sending a webhook request. :param unicode configuration_filters: The list of events, firing webhook event for this Session. :param unicode configuration_triggers: The list of keywords, firing webhook event for this Session. :param unicode configuration_flow_sid: The studio flow sid, where the webhook should be sent to. :param unicode configuration_retry_count: The number of retries in case of webhook request failures. :param unicode configuration_replay_after: The message index for which and it's successors the webhook will be replayed. :param bool configuration_buffer_messages: The flag whether buffering should be applied to messages. :param unicode configuration_buffer_window: The period of buffering messages. :returns: Newly created WebhookInstance :rtype: twilio.rest.messaging.v1.session.webhook.WebhookInstance """ data = values.of({ 'Target': target, 'Configuration.Url': configuration_url, 'Configuration.Method': configuration_method, 'Configuration.Filters': serialize.map(configuration_filters, lambda e: e), 'Configuration.Triggers': serialize.map(configuration_triggers, lambda e: e), 'Configuration.FlowSid': configuration_flow_sid, 'Configuration.RetryCount': configuration_retry_count, 'Configuration.ReplayAfter': configuration_replay_after, 'Configuration.BufferMessages': configuration_buffer_messages, 'Configuration.BufferWindow': configuration_buffer_window, }) payload = self._version.create( 'POST', self._uri, data=data, ) return WebhookInstance(self._version, payload, session_sid=self._solution['session_sid'], )
Create a new WebhookInstance :param WebhookInstance.Target target: The target of this webhook. :param unicode configuration_url: The absolute url the webhook request should be sent to. :param WebhookInstance.Method configuration_method: The HTTP method to be used when sending a webhook request. :param unicode configuration_filters: The list of events, firing webhook event for this Session. :param unicode configuration_triggers: The list of keywords, firing webhook event for this Session. :param unicode configuration_flow_sid: The studio flow sid, where the webhook should be sent to. :param unicode configuration_retry_count: The number of retries in case of webhook request failures. :param unicode configuration_replay_after: The message index for which and it's successors the webhook will be replayed. :param bool configuration_buffer_messages: The flag whether buffering should be applied to messages. :param unicode configuration_buffer_window: The period of buffering messages. :returns: Newly created WebhookInstance :rtype: twilio.rest.messaging.v1.session.webhook.WebhookInstance
def process_tick(self, tup): """Called every slide_interval """ curtime = int(time.time()) window_info = WindowContext(curtime - self.window_duration, curtime) tuple_batch = [] for (tup, tm) in self.current_tuples: tuple_batch.append(tup) self.processWindow(window_info, tuple_batch) self._expire(curtime)
Called every slide_interval
def inverse(self): """ Return the inverse of the graph. @rtype: graph @return: Complement graph for the graph. """ inv = self.__class__() inv.add_nodes(self.nodes()) inv.complete() for each in self.edges(): if (inv.has_edge(each)): inv.del_edge(each) return inv
Return the inverse of the graph. @rtype: graph @return: Complement graph for the graph.