positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def bayes_risk(self, expparams): r""" Calculates the Bayes risk for hypothetical experiments, assuming the quadratic loss function defined by the current model's scale matrix (see :attr:`qinfer.abstract_model.Simulatable.Q`). :param expparams: The experiments at which to compute the risk. :type expparams: :class:`~numpy.ndarray` of dtype given by the current model's :attr:`~qinfer.abstract_model.Simulatable.expparams_dtype` property, and of shape ``(1,)`` :return np.ndarray: The Bayes risk for the current posterior distribution at each hypothetical experiment in ``expparams``, therefore has shape ``(expparams.size,)`` """ # for models whose outcome number changes with experiment, we # take the easy way out and for-loop over experiments n_eps = expparams.size if n_eps > 1 and not self.model.is_n_outcomes_constant: risk = np.empty(n_eps) for idx in range(n_eps): risk[idx] = self.bayes_risk(expparams[idx, np.newaxis]) return risk # outcomes for the first experiment os = self.model.domain(expparams[0,np.newaxis])[0].values # compute the hypothetical weights, likelihoods and normalizations for # every possible outcome and expparam # the likelihood over outcomes should sum to 1, so don't compute for last outcome w_hyp, L, N = self.hypothetical_update( os[:-1], expparams, return_normalization=True, return_likelihood=True ) w_hyp_last_outcome = (1 - L.sum(axis=0)) * self.particle_weights[np.newaxis, :] N = np.concatenate([N[:,:,0], np.sum(w_hyp_last_outcome[np.newaxis,:,:], axis=2)], axis=0) w_hyp_last_outcome = w_hyp_last_outcome / N[-1,:,np.newaxis] w_hyp = np.concatenate([w_hyp, w_hyp_last_outcome[np.newaxis,:,:]], axis=0) # w_hyp.shape == (n_out, n_eps, n_particles) # N.shape == (n_out, n_eps) # compute the hypothetical means and variances given outcomes and exparams # mu_hyp.shape == (n_out, n_eps, n_models) # var_hyp.shape == (n_out, n_eps) mu_hyp = np.dot(w_hyp, self.particle_locations) var_hyp = np.sum( w_hyp * np.sum(self.model.Q * ( self.particle_locations[np.newaxis,np.newaxis,:,:] - mu_hyp[:,:,np.newaxis,:] ) ** 2, axis=3), axis=2 ) # the risk of a given expparam can be calculated as the mean posterior # variance weighted over all possible outcomes return np.sum(N * var_hyp, axis=0)
r""" Calculates the Bayes risk for hypothetical experiments, assuming the quadratic loss function defined by the current model's scale matrix (see :attr:`qinfer.abstract_model.Simulatable.Q`). :param expparams: The experiments at which to compute the risk. :type expparams: :class:`~numpy.ndarray` of dtype given by the current model's :attr:`~qinfer.abstract_model.Simulatable.expparams_dtype` property, and of shape ``(1,)`` :return np.ndarray: The Bayes risk for the current posterior distribution at each hypothetical experiment in ``expparams``, therefore has shape ``(expparams.size,)``
def is_all_field_none(self): """ :rtype: bool """ if self._monetary_account_id is not None: return False if self._alias is not None: return False if self._counterparty_alias is not None: return False if self._amount_guaranteed is not None: return False if self._amount_requested is not None: return False if self._issuer is not None: return False if self._issuer_authentication_url is not None: return False if self._status is not None: return False if self._error_message is not None: return False if self._transaction_identifier is not None: return False return True
:rtype: bool
def getChanges(self, request): """ Reponds only to POST events and starts the build process :arguments: request the http request object """ expected_secret = isinstance(self.options, dict) and self.options.get('secret') if expected_secret: received_secret = request.getHeader(_HEADER_GITLAB_TOKEN) received_secret = bytes2unicode(received_secret) p = Properties() p.master = self.master expected_secret_value = yield p.render(expected_secret) if received_secret != expected_secret_value: raise ValueError("Invalid secret") try: content = request.content.read() payload = json.loads(bytes2unicode(content)) except Exception as e: raise ValueError("Error loading JSON: " + str(e)) event_type = request.getHeader(_HEADER_EVENT) event_type = bytes2unicode(event_type) # newer version of gitlab have a object_kind parameter, # which allows not to use the http header event_type = payload.get('object_kind', event_type) codebase = request.args.get(b'codebase', [None])[0] codebase = bytes2unicode(codebase) if event_type in ("push", "tag_push", "Push Hook"): user = payload['user_name'] repo = payload['repository']['name'] repo_url = payload['repository']['url'] changes = self._process_change( payload, user, repo, repo_url, event_type, codebase=codebase) elif event_type == 'merge_request': changes = self._process_merge_request_change( payload, event_type, codebase=codebase) else: changes = [] if changes: log.msg("Received {} changes from {} gitlab event".format( len(changes), event_type)) return (changes, 'git')
Reponds only to POST events and starts the build process :arguments: request the http request object
def _guess_vc(self): """ Locate Visual C for 2017 """ if self.vc_ver <= 14.0: return default = r'VC\Tools\MSVC' guess_vc = os.path.join(self.VSInstallDir, default) # Subdir with VC exact version as name try: vc_exact_ver = os.listdir(guess_vc)[-1] return os.path.join(guess_vc, vc_exact_ver) except (OSError, IOError, IndexError): pass
Locate Visual C for 2017
def get(self): """ Constructs a WorkflowCumulativeStatisticsContext :returns: twilio.rest.taskrouter.v1.workspace.workflow.workflow_cumulative_statistics.WorkflowCumulativeStatisticsContext :rtype: twilio.rest.taskrouter.v1.workspace.workflow.workflow_cumulative_statistics.WorkflowCumulativeStatisticsContext """ return WorkflowCumulativeStatisticsContext( self._version, workspace_sid=self._solution['workspace_sid'], workflow_sid=self._solution['workflow_sid'], )
Constructs a WorkflowCumulativeStatisticsContext :returns: twilio.rest.taskrouter.v1.workspace.workflow.workflow_cumulative_statistics.WorkflowCumulativeStatisticsContext :rtype: twilio.rest.taskrouter.v1.workspace.workflow.workflow_cumulative_statistics.WorkflowCumulativeStatisticsContext
def get_node(conn, name): ''' Return a libcloud node for the named VM ''' nodes = conn.list_nodes() for node in nodes: if node.name == name: __utils__['cloud.cache_node'](salt.utils.data.simple_types_filter(node.__dict__), __active_provider_name__, __opts__) return node
Return a libcloud node for the named VM
def _load_settings_from_source(self, source): """ Loads the relevant settings from the specified ``source``. :returns: a standard :func:`dict` containing the settings from the source :rtype: dict """ if not source: pass elif source == 'env_settings_uri': for env_settings_uri_key in self.env_settings_uri_keys: env_settings_uri = self._search_environ(env_settings_uri_key) if env_settings_uri: logger.debug('Found {} in the environment.'.format(env_settings_uri_key)) yield env_settings_uri, self._load_settings_from_uri(env_settings_uri) #end if #end for elif source == 'env': logger.debug('Loaded {} settings from the environment.'.format(len(os.environ))) yield source, dict(os.environ.items()) elif isinstance(source, ParseResult): settings = self._load_settings_from_uri(source) yield source, settings elif isinstance(source, str): try: spec = importlib.util.find_spec(source) except (AttributeError, ImportError): spec = None settings = self._load_settings_from_spec(spec, name=source) if settings is None: _, ext = os.path.splitext(source) with uri_open(source, 'rb') as f: yield source, self._load_settings_from_file(f, ext=ext) else: yield source, settings #end if elif hasattr(source, 'read'): yield source.name, self._load_settings_from_file(source) elif hasattr(source, 'items'): source_type = type(source).__name__ for dict_settings_uri_key in self.dict_settings_uri_keys: if dict_settings_uri_key and dict_settings_uri_key in source and source[dict_settings_uri_key]: logger.debug('Found {} in the dict-like object <{}>.'.format(dict_settings_uri_key, source_type)) yield from self._load_settings_from_source(source[dict_settings_uri_key]) #end if #end for logger.debug('Loaded {} settings from dict-like object <{}>.'.format(len(source), source_type)) yield self._get_unique_name(source_type), source else: source_type = type(source).__name__ for object_settings_uri_key in self.object_settings_uri_keys: if object_settings_uri_key and hasattr(source, object_settings_uri_key) and getattr(source, object_settings_uri_key): logger.debug('Found {} in the object <{}>.'.format(object_settings_uri_key, source_type)) yield from self._load_settings_from_source(getattr(source, object_settings_uri_key)) #end if #end for settings = dict((k, v) for k, v in source.__dict__.items() if not k.startswith('__')) logger.debug('Loaded {} settings from object <{}>.'.format(len(settings), source_type)) yield self._get_unique_name(source_type), settings
Loads the relevant settings from the specified ``source``. :returns: a standard :func:`dict` containing the settings from the source :rtype: dict
def enable_event(self, event_type, mechanism, context=None): """Enable event occurrences for specified event types and mechanisms in this resource. :param event_type: Logical event identifier. :param mechanism: Specifies event handling mechanisms to be enabled. (Constants.VI_QUEUE, .VI_HNDLR, .VI_SUSPEND_HNDLR) :param context: Not currently used, leave as None. """ self.visalib.enable_event(self.session, event_type, mechanism, context)
Enable event occurrences for specified event types and mechanisms in this resource. :param event_type: Logical event identifier. :param mechanism: Specifies event handling mechanisms to be enabled. (Constants.VI_QUEUE, .VI_HNDLR, .VI_SUSPEND_HNDLR) :param context: Not currently used, leave as None.
def get_r(self): """Returns the right border of the cell""" start_point, end_point = self._get_right_line_coordinates() width = self._get_right_line_width() color = self._get_right_line_color() return CellBorder(start_point, end_point, width, color)
Returns the right border of the cell
def send_to_kinesis_stream(events, stream_name, partition_key=None, packer=None, serializer=json.dumps): """Sends events to a Kinesis stream.""" if not events: logger.info("No events provided: nothing delivered to Firehose") return records = [] for event in events: if not partition_key: partition_key_value = str(uuid.uuid4()) elif hasattr(partition_key, "__call__"): partition_key_value = partition_key(event) else: partition_key_value = partition_key if not isinstance(event, str): event = serializer(event) if packer: event = packer(event) record = {"Data": event, "PartitionKey": partition_key_value} records.append(record) kinesis = boto3.client("kinesis") resp = kinesis.put_records(StreamName=stream_name, Records=records) return resp
Sends events to a Kinesis stream.
def get_escalation_policies(profile='pagerduty', subdomain=None, api_key=None): ''' List escalation_policies belonging to this account CLI Example: salt myminion pagerduty.get_escalation_policies ''' return _list_items( 'escalation_policies', 'id', profile=profile, subdomain=subdomain, api_key=api_key, )
List escalation_policies belonging to this account CLI Example: salt myminion pagerduty.get_escalation_policies
def get(self, obj, key): """ Retrieve 'key' from an instance of a class which previously exposed it. @param key: a hashable object, previously passed to L{Exposer.expose}. @return: the object which was exposed with the given name on obj's key. @raise MethodNotExposed: when the key in question was not exposed with this exposer. """ if key not in self._exposed: raise MethodNotExposed() rightFuncs = self._exposed[key] T = obj.__class__ seen = {} for subT in inspect.getmro(T): for name, value in subT.__dict__.items(): for rightFunc in rightFuncs: if value is rightFunc: if name in seen: raise MethodNotExposed() return value.__get__(obj, T) seen[name] = True raise MethodNotExposed()
Retrieve 'key' from an instance of a class which previously exposed it. @param key: a hashable object, previously passed to L{Exposer.expose}. @return: the object which was exposed with the given name on obj's key. @raise MethodNotExposed: when the key in question was not exposed with this exposer.
def ttl_cache(maxage, maxsize=128): """ A time-to-live caching decorator that follows after the style of lru_cache. The `maxage` argument is time-to-live in seconds for each cache result. Any cache entries over the maxage are lazily replaced. """ def decorator(inner_func): wrapper = make_ttl_cache_wrapper(inner_func, maxage, maxsize) return functools.update_wrapper(wrapper, inner_func) return decorator
A time-to-live caching decorator that follows after the style of lru_cache. The `maxage` argument is time-to-live in seconds for each cache result. Any cache entries over the maxage are lazily replaced.
def to_dictionary(pw, print_list): """ - convert list of comparisons to dictionary - print list of pidents (if requested) to stderr """ pairs = {} for p in pw: a, b, pident = p if a not in pairs: pairs[a] = {a: '-'} if b not in pairs: pairs[b] = {b: '-'} pairs[a][b] = pident pairs[b][a] = pident if print_list is True: A, B = a.split('>')[1], b.split('>')[1] print('\t'.join([str(i) for i in [A, B, pident]]), file=sys.stderr) print('\t'.join([str(i) for i in [B, A, pident]]), file=sys.stderr) return pairs
- convert list of comparisons to dictionary - print list of pidents (if requested) to stderr
def close(self): """Close the tough cursor. It will not complain if you close it more than once. """ if not self._closed: try: self._cursor.close() except Exception: pass self._closed = True
Close the tough cursor. It will not complain if you close it more than once.
def libvlc_media_library_media_list(p_mlib): '''Get media library subitems. @param p_mlib: media library object. @return: media list subitems. ''' f = _Cfunctions.get('libvlc_media_library_media_list', None) or \ _Cfunction('libvlc_media_library_media_list', ((1,),), class_result(MediaList), ctypes.c_void_p, MediaLibrary) return f(p_mlib)
Get media library subitems. @param p_mlib: media library object. @return: media list subitems.
def __extract_modules(self, loader, name, is_pkg): """ if module found load module and save all attributes in the module found """ mod = loader.find_module(name).load_module(name) """ find the attribute method on each module """ if hasattr(mod, '__method__'): """ register to the blueprint if method attribute found """ module_router = ModuleRouter(mod, ignore_names=self.__serialize_module_paths() ).register_route(app=self.application, name=name) self.__routers.extend(module_router.routers) self.__modules.append(mod) else: """ prompt not found notification """ # print('{} has no module attribute method'.format(mod)) pass
if module found load module and save all attributes in the module found
def mail(ui, repo, *pats, **opts): """mail a change for review Uploads a patch to the code review server and then sends mail to the reviewer and CC list asking for a review. """ if codereview_disabled: raise hg_util.Abort(codereview_disabled) cl, err = CommandLineCL(ui, repo, pats, opts, op="mail", defaultcc=defaultcc) if err != "": raise hg_util.Abort(err) cl.Upload(ui, repo, gofmt_just_warn=True) if not cl.reviewer: # If no reviewer is listed, assign the review to defaultcc. # This makes sure that it appears in the # codereview.appspot.com/user/defaultcc # page, so that it doesn't get dropped on the floor. if not defaultcc: raise hg_util.Abort("no reviewers listed in CL") cl.cc = Sub(cl.cc, defaultcc) cl.reviewer = defaultcc cl.Flush(ui, repo) if cl.files == []: raise hg_util.Abort("no changed files, not sending mail") cl.Mail(ui, repo)
mail a change for review Uploads a patch to the code review server and then sends mail to the reviewer and CC list asking for a review.
def daemonize(self): """Double fork and set the pid.""" self._double_fork() # Write pidfile. self.pid = os.getpid() LOG.info( "Succesfully daemonized process {0}.".format(self.pid) )
Double fork and set the pid.
def get_indexer_nd(index, labels, method=None, tolerance=None): """ Call pd.Index.get_indexer(labels). """ kwargs = _index_method_kwargs(method, tolerance) flat_labels = np.ravel(labels) flat_indexer = index.get_indexer(flat_labels, **kwargs) indexer = flat_indexer.reshape(labels.shape) return indexer
Call pd.Index.get_indexer(labels).
def set_attribute_label(series, resource_labels, attribute_key, canonical_key=None, label_value_prefix=''): """Set a label to timeseries that can be used for monitoring :param series: TimeSeries object based on view data :param resource_labels: collection of labels :param attribute_key: actual label key :param canonical_key: exporter specific label key, Optional :param label_value_prefix: exporter specific label value prefix, Optional """ if attribute_key in resource_labels: if canonical_key is None: canonical_key = attribute_key series.resource.labels[canonical_key] = \ label_value_prefix + resource_labels[attribute_key]
Set a label to timeseries that can be used for monitoring :param series: TimeSeries object based on view data :param resource_labels: collection of labels :param attribute_key: actual label key :param canonical_key: exporter specific label key, Optional :param label_value_prefix: exporter specific label value prefix, Optional
def global_config(cls, key, *args): ''' This reads or sets the global settings stored in class.settings. ''' if args: cls.settings[key] = args[0] else: return cls.settings[key]
This reads or sets the global settings stored in class.settings.
def id_request(self, device_id): """Get the device for the ID. ID request can return device type (cat/subcat), firmware ver, etc. Cat is status['is_high'], sub cat is status['id_mid']""" self.logger.info("\nid_request for device %s", device_id) device_id = device_id.upper() self.direct_command(device_id, '10', '00') sleep(2) status = self.get_buffer_status(device_id) if not status: sleep(1) status = self.get_buffer_status(device_id) return status
Get the device for the ID. ID request can return device type (cat/subcat), firmware ver, etc. Cat is status['is_high'], sub cat is status['id_mid']
def init_properties(env='dev', app='unnecessary', **_): """Make sure _application.properties_ file exists in S3. For Applications with Archaius support, there needs to be a file where the cloud environment variable points to. Args: env (str): Deployment environment/account, i.e. dev, stage, prod. app (str): GitLab Project name. Returns: True when application.properties was found. False when application.properties needed to be created. """ aws_env = boto3.session.Session(profile_name=env) s3client = aws_env.resource('s3') generated = get_details(app=app, env=env) archaius = generated.archaius() archaius_file = ('{path}/application.properties').format(path=archaius['path']) try: s3client.Object(archaius['bucket'], archaius_file).get() LOG.info('Found: %(bucket)s/%(file)s', {'bucket': archaius['bucket'], 'file': archaius_file}) return True except boto3.exceptions.botocore.client.ClientError: s3client.Object(archaius['bucket'], archaius_file).put() LOG.info('Created: %(bucket)s/%(file)s', {'bucket': archaius['bucket'], 'file': archaius_file}) return False
Make sure _application.properties_ file exists in S3. For Applications with Archaius support, there needs to be a file where the cloud environment variable points to. Args: env (str): Deployment environment/account, i.e. dev, stage, prod. app (str): GitLab Project name. Returns: True when application.properties was found. False when application.properties needed to be created.
def _add_edge(self, idx, from_idx, from_lvec, to_idx, to_lvec): """ Add information about an edge linking two critical points. This actually describes two edges: from_idx ------ idx ------ to_idx However, in practice, from_idx and to_idx will typically be atom nuclei, with the center node (idx) referring to a bond critical point. Thus, it will be more convenient to model this as a single edge linking nuclei with the properties of the bond critical point stored as an edge attribute. :param idx: index of node :param from_idx: from index of node :param from_lvec: vector of lattice image the from node is in as tuple of ints :param to_idx: to index of node :param to_lvec: vector of lattice image the to node is in as tuple of ints :return: """ self.edges[idx] = {'from_idx': from_idx, 'from_lvec': from_lvec, 'to_idx': to_idx, 'to_lvec': to_lvec}
Add information about an edge linking two critical points. This actually describes two edges: from_idx ------ idx ------ to_idx However, in practice, from_idx and to_idx will typically be atom nuclei, with the center node (idx) referring to a bond critical point. Thus, it will be more convenient to model this as a single edge linking nuclei with the properties of the bond critical point stored as an edge attribute. :param idx: index of node :param from_idx: from index of node :param from_lvec: vector of lattice image the from node is in as tuple of ints :param to_idx: to index of node :param to_lvec: vector of lattice image the to node is in as tuple of ints :return:
def new_log_level(level, name, logger_name=None): """ Quick way to create a custom log level that behaves like the default levels in the logging module. :param level: level number :param name: level name :param logger_name: optional logger name """ @CustomLogLevel(level, name, logger_name) def _default_template(logger, msg, *args, **kwargs): return msg, args, kwargs
Quick way to create a custom log level that behaves like the default levels in the logging module. :param level: level number :param name: level name :param logger_name: optional logger name
def delete_cached_branch_info(self): ''' Deletes cached branch_info file ''' if os.path.isfile(constants.cached_branch_info): logger.debug('Deleting cached branch_info file...') os.remove(constants.cached_branch_info) else: logger.debug('Cached branch_info file does not exist.')
Deletes cached branch_info file
def add2(self, target, path_settings, method): """ add() with reordered paameters """ return self.add(method, path_settings, target)
add() with reordered paameters
def save(self): """Save the index data back to the wily cache.""" data = [i.asdict() for i in self._revisions.values()] logger.debug("Saving data") cache.store_archiver_index(self.config, self.archiver, data)
Save the index data back to the wily cache.
def get_cached(self, link, default=None): '''Retrieves a cached navigator from the id_map. Either a Link object or a bare uri string may be passed in.''' if hasattr(link, 'uri'): return self.id_map.get(link.uri, default) else: return self.id_map.get(link, default)
Retrieves a cached navigator from the id_map. Either a Link object or a bare uri string may be passed in.
def get_nendo (): """今は何年度?""" y, m = map(int, time.strftime("%Y %m").split()) return y if m >= 4 else y - 1
今は何年度?
def do_add_signature(input_file, output_file, signature_file): """Add a signature to the MAR file.""" signature = open(signature_file, 'rb').read() if len(signature) == 256: hash_algo = 'sha1' elif len(signature) == 512: hash_algo = 'sha384' else: raise ValueError() with open(output_file, 'w+b') as dst: with open(input_file, 'rb') as src: add_signature_block(src, dst, hash_algo, signature)
Add a signature to the MAR file.
def warn(self, msg, *args, **kwargs): """Log an warning message.""" self.log(self.WARN, msg, *args, **kwargs)
Log an warning message.
def _compute_iso_color(self): """ compute LineVisual color from level index and corresponding level color """ level_color = [] colors = self._lc for i, index in enumerate(self._li): level_color.append(np.zeros((index, 4)) + colors[i]) self._cl = np.vstack(level_color)
compute LineVisual color from level index and corresponding level color
def get_style_attribute(style_attribute, html_element): ''' ::param: style_directive \ The attribute value of the given style sheet. Example: display: none ::param: html_element: \ The HtmlElement to which the given style is applied ::returns: A HtmlElement that merges the given element with the style attributes specified. ''' custome_html_element = html_element.clone() for style_directive in style_attribute.lower().split(';'): if ':' not in style_directive: continue key, value = (s.strip() for s in style_directive.split(':', 1)) try: apply_style = getattr(CssParse, "_attr_" + key.replace('-webkit-', '') .replace("-", "_")) apply_style(value, custome_html_element) except AttributeError: pass return custome_html_element
::param: style_directive \ The attribute value of the given style sheet. Example: display: none ::param: html_element: \ The HtmlElement to which the given style is applied ::returns: A HtmlElement that merges the given element with the style attributes specified.
def build_penalties(self): """ builds the GAM block-diagonal penalty matrix in quadratic form out of penalty matrices specified for each feature. each feature penalty matrix is multiplied by a lambda for that feature. so for m features: P = block_diag[lam0 * P0, lam1 * P1, lam2 * P2, ... , lamm * Pm] Parameters ---------- None Returns ------- P : sparse CSC matrix containing the model penalties in quadratic form """ P = [] for term in self._terms: P.append(term.build_penalties()) return sp.sparse.block_diag(P)
builds the GAM block-diagonal penalty matrix in quadratic form out of penalty matrices specified for each feature. each feature penalty matrix is multiplied by a lambda for that feature. so for m features: P = block_diag[lam0 * P0, lam1 * P1, lam2 * P2, ... , lamm * Pm] Parameters ---------- None Returns ------- P : sparse CSC matrix containing the model penalties in quadratic form
def records(rec_type=None, fields=None, clean=True): ''' Return DMI records from SMBIOS type Return only records of type(s) The SMBIOS specification defines the following DMI types: ==== ====================================== Type Information ==== ====================================== 0 BIOS 1 System 2 Baseboard 3 Chassis 4 Processor 5 Memory Controller 6 Memory Module 7 Cache 8 Port Connector 9 System Slots 10 On Board Devices 11 OEM Strings 12 System Configuration Options 13 BIOS Language 14 Group Associations 15 System Event Log 16 Physical Memory Array 17 Memory Device 18 32-bit Memory Error 19 Memory Array Mapped Address 20 Memory Device Mapped Address 21 Built-in Pointing Device 22 Portable Battery 23 System Reset 24 Hardware Security 25 System Power Controls 26 Voltage Probe 27 Cooling Device 28 Temperature Probe 29 Electrical Current Probe 30 Out-of-band Remote Access 31 Boot Integrity Services 32 System Boot 33 64-bit Memory Error 34 Management Device 35 Management Device Component 36 Management Device Threshold Data 37 Memory Channel 38 IPMI Device 39 Power Supply 40 Additional Information 41 Onboard Devices Extended Information 42 Management Controller Host Interface ==== ====================================== clean | Don't return well-known false information | (invalid UUID's, serial 000000000's, etcetera) | Defaults to ``True`` CLI Example: .. code-block:: bash salt '*' smbios.records clean=False salt '*' smbios.records 14 salt '*' smbios.records 4 core_count,thread_count,current_speed ''' if rec_type is None: smbios = _dmi_parse(_dmidecoder(), clean, fields) else: smbios = _dmi_parse(_dmidecoder('-t {0}'.format(rec_type)), clean, fields) return smbios
Return DMI records from SMBIOS type Return only records of type(s) The SMBIOS specification defines the following DMI types: ==== ====================================== Type Information ==== ====================================== 0 BIOS 1 System 2 Baseboard 3 Chassis 4 Processor 5 Memory Controller 6 Memory Module 7 Cache 8 Port Connector 9 System Slots 10 On Board Devices 11 OEM Strings 12 System Configuration Options 13 BIOS Language 14 Group Associations 15 System Event Log 16 Physical Memory Array 17 Memory Device 18 32-bit Memory Error 19 Memory Array Mapped Address 20 Memory Device Mapped Address 21 Built-in Pointing Device 22 Portable Battery 23 System Reset 24 Hardware Security 25 System Power Controls 26 Voltage Probe 27 Cooling Device 28 Temperature Probe 29 Electrical Current Probe 30 Out-of-band Remote Access 31 Boot Integrity Services 32 System Boot 33 64-bit Memory Error 34 Management Device 35 Management Device Component 36 Management Device Threshold Data 37 Memory Channel 38 IPMI Device 39 Power Supply 40 Additional Information 41 Onboard Devices Extended Information 42 Management Controller Host Interface ==== ====================================== clean | Don't return well-known false information | (invalid UUID's, serial 000000000's, etcetera) | Defaults to ``True`` CLI Example: .. code-block:: bash salt '*' smbios.records clean=False salt '*' smbios.records 14 salt '*' smbios.records 4 core_count,thread_count,current_speed
def filter_values(self, pattern, flags=0): """ | Filters the :meth:`PlistFileParser.elements` class property elements using given pattern. | Will return a list of matching elements values, if you want to get only one element value, use the :meth:`PlistFileParser.get_value` method instead. Usage:: >>> plist_file_parser = PlistFileParser("standard.plist") >>> plist_file_parser.parse() True >>> plist_file_parser.filter_values(r"String A") [u'My Value A'] >>> plist_file_parser.filter_values(r"String.*") [u'My Value C', u'My Value B', u'My Value A'] :param pattern: Regex filtering pattern. :type pattern: unicode :param flags: Regex flags. :type flags: int :return: Values. :rtype: list """ values = [] if not self.__elements: return values for item in foundations.walkers.dictionaries_walker(self.__elements): path, element, value = item if re.search(pattern, element, flags): values.append(value) return values
| Filters the :meth:`PlistFileParser.elements` class property elements using given pattern. | Will return a list of matching elements values, if you want to get only one element value, use the :meth:`PlistFileParser.get_value` method instead. Usage:: >>> plist_file_parser = PlistFileParser("standard.plist") >>> plist_file_parser.parse() True >>> plist_file_parser.filter_values(r"String A") [u'My Value A'] >>> plist_file_parser.filter_values(r"String.*") [u'My Value C', u'My Value B', u'My Value A'] :param pattern: Regex filtering pattern. :type pattern: unicode :param flags: Regex flags. :type flags: int :return: Values. :rtype: list
def data_sessions(self): """ Access the data_sessions :returns: twilio.rest.wireless.v1.sim.data_session.DataSessionList :rtype: twilio.rest.wireless.v1.sim.data_session.DataSessionList """ if self._data_sessions is None: self._data_sessions = DataSessionList(self._version, sim_sid=self._solution['sid'], ) return self._data_sessions
Access the data_sessions :returns: twilio.rest.wireless.v1.sim.data_session.DataSessionList :rtype: twilio.rest.wireless.v1.sim.data_session.DataSessionList
def load_rules(self, filename): """ Load rules from YAML configuration in the given stream object :param filename: Filename of rule YAML file :return: rules object """ self.logger.debug('Reading rules from %s', filename) try: in_file = open(filename) except IOError: self.logger.error('Error opening {0}'.format(filename)) raise y = None try: y = yaml.load(in_file) except yaml.YAMLError as exc: if hasattr(exc, 'problem_mark'): self.logger.error('Error parsing rules{0}'.format(exc.problem_mark)) else: self.logger.error('Error parsing rules in {0}'.format(in_file.name)) raise return y
Load rules from YAML configuration in the given stream object :param filename: Filename of rule YAML file :return: rules object
def derive_link_fields(self, context): """ Used to derive which fields should be linked. This should return a set() containing the names of those fields which should be linkable. """ if self.link_fields is not None: return self.link_fields else: link_fields = set() if self.fields: for field in self.fields: if field != 'is_active': link_fields.add(field) break return link_fields
Used to derive which fields should be linked. This should return a set() containing the names of those fields which should be linkable.
def serialize_table(ctx, document, table, root): """Serializes table element. """ # What we should check really is why do we pass None as root element # There is a good chance some content is missing after the import if root is None: return root if ctx.ilvl != None: root = close_list(ctx, root) ctx.ilvl, ctx.numid = None, None _table = etree.SubElement(root, 'table') _table.set('border', '1') _table.set('width', '100%') style = get_style(document, table) if style: _table.set('class', get_css_classes(document, style)) for rows in table.rows: _tr = etree.SubElement(_table, 'tr') for cell in rows: _td = etree.SubElement(_tr, 'td') if cell.grid_span != 1: _td.set('colspan', str(cell.grid_span)) if cell.row_span != 1: _td.set('rowspan', str(cell.row_span)) for elem in cell.elements: if isinstance(elem, doc.Paragraph): _ser = ctx.get_serializer(elem) _td = _ser(ctx, document, elem, _td, embed=False) if ctx.ilvl != None: # root = close_list(ctx, root) _td = close_list(ctx, _td) ctx.ilvl, ctx.numid = None, None fire_hooks(ctx, document, table, _td, ctx.get_hook('td')) fire_hooks(ctx, document, table, _td, ctx.get_hook('tr')) fire_hooks(ctx, document, table, _table, ctx.get_hook('table')) return root
Serializes table element.
def append(self, map): """ Appends new elements to this map. :param map: a map with elements to be added. """ if isinstance(map, dict): for (k, v) in map.items(): key = StringConverter.to_string(k) value = v self.put(key, value)
Appends new elements to this map. :param map: a map with elements to be added.
def _inject(self, fileobj, padding_func): """Write tag data into the Vorbis comment packet/page.""" # Find the old pages in the file; we'll need to remove them, # plus grab any stray setup packet data out of them. fileobj.seek(0) page = OggPage(fileobj) while not page.packets[0].startswith(b"\x03vorbis"): page = OggPage(fileobj) old_pages = [page] while not (old_pages[-1].complete or len(old_pages[-1].packets) > 1): page = OggPage(fileobj) if page.serial == old_pages[0].serial: old_pages.append(page) packets = OggPage.to_packets(old_pages, strict=False) content_size = get_size(fileobj) - len(packets[0]) # approx vcomment_data = b"\x03vorbis" + self.write() padding_left = len(packets[0]) - len(vcomment_data) info = PaddingInfo(padding_left, content_size) new_padding = info._get_padding(padding_func) # Set the new comment packet. packets[0] = vcomment_data + b"\x00" * new_padding new_pages = OggPage._from_packets_try_preserve(packets, old_pages) OggPage.replace(fileobj, old_pages, new_pages)
Write tag data into the Vorbis comment packet/page.
def get_nfkd_quick_check_property(value, is_bytes=False): """Get `NFKD QUICK CHECK` property.""" obj = unidata.ascii_nfkd_quick_check if is_bytes else unidata.unicode_nfkd_quick_check if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['nfkdquickcheck'].get(negated, negated) else: value = unidata.unicode_alias['nfkdquickcheck'].get(value, value) return obj[value]
Get `NFKD QUICK CHECK` property.
def update(self, portfolio, date, perfs=None): ''' Actualizes the portfolio universe with the alog state ''' # Make the manager aware of current simulation self.portfolio = portfolio self.perfs = perfs self.date = date
Actualizes the portfolio universe with the alog state
def _ref_covered_by_at_least_one_full_length_contig(nucmer_hits, percent_threshold, max_nt_extend): '''Returns true iff there exists a contig that completely covers the reference sequence nucmer_hits = hits made by self._parse_nucmer_coords_file.''' for l in nucmer_hits.values(): for hit in l: if ( (2 * max_nt_extend) + len(hit.ref_coords()) ) / hit.ref_length >= percent_threshold: return True return False
Returns true iff there exists a contig that completely covers the reference sequence nucmer_hits = hits made by self._parse_nucmer_coords_file.
def decrement(self, key, value=1): """ Decrement the value of an item in the cache. :param key: The cache key :type key: str :param value: The decrement value :type value: int :rtype: int or bool """ self._store.decrement(self.tagged_item_key(key), value)
Decrement the value of an item in the cache. :param key: The cache key :type key: str :param value: The decrement value :type value: int :rtype: int or bool
def filter(self, table, vg_snapshots, filter_string): """Naive case-insensitive search.""" query = filter_string.lower() return [vg_snapshot for vg_snapshot in vg_snapshots if query in vg_snapshot.name.lower()]
Naive case-insensitive search.
def produceResource(self, request, segments, webViewer): """ Return a C{(resource, subsegments)} tuple or None, depending on whether I wish to return an L{IResource} provider for the given set of segments or not. """ def thunk(): cr = getattr(self, 'createResource', None) if cr is not None: return cr() else: return self.createResourceWith(webViewer) return self._produceIt(segments, thunk)
Return a C{(resource, subsegments)} tuple or None, depending on whether I wish to return an L{IResource} provider for the given set of segments or not.
def triad(note, key): """Return the triad on note in key as a list. Examples: >>> triad('E', 'C') ['E', 'G', 'B'] >>> triad('E', 'B') ['E', 'G#', 'B'] """ return [note, intervals.third(note, key), intervals.fifth(note, key)]
Return the triad on note in key as a list. Examples: >>> triad('E', 'C') ['E', 'G', 'B'] >>> triad('E', 'B') ['E', 'G#', 'B']
def list_files(path): """Recursively collects a list of files at a path.""" files = [] if os.path.isdir(path): for stats in os.walk(path): for f in stats[2]: files.append(os.path.join(stats[0], f)) elif os.path.isfile(path): files = [path] return files
Recursively collects a list of files at a path.
def order_mod( x, m ): """Return the order of x in the multiplicative group mod m. """ # Warning: this implementation is not very clever, and will # take a long time if m is very large. if m <= 1: return 0 assert gcd( x, m ) == 1 z = x result = 1 while z != 1: z = ( z * x ) % m result = result + 1 return result
Return the order of x in the multiplicative group mod m.
def start(self): """The event's start time, as a timezone-aware datetime object""" if self.start_time is None: time = datetime.time(hour=19, tzinfo=CET) else: time = self.start_time.replace(tzinfo=CET) return datetime.datetime.combine(self.date, time)
The event's start time, as a timezone-aware datetime object
def evaluate_all(ctx, model): """Evaluate POS taggers on WSJ and GENIA.""" click.echo('chemdataextractor.pos.evaluate_all') click.echo('Model: %s' % model) ctx.invoke(evaluate, model='%s_wsj_nocluster.pickle' % model, corpus='wsj', clusters=False) ctx.invoke(evaluate, model='%s_wsj_nocluster.pickle' % model, corpus='genia', clusters=False) ctx.invoke(evaluate, model='%s_wsj.pickle' % model, corpus='wsj', clusters=True) ctx.invoke(evaluate, model='%s_wsj.pickle' % model, corpus='genia', clusters=True) ctx.invoke(evaluate, model='%s_genia_nocluster.pickle' % model, corpus='wsj', clusters=False) ctx.invoke(evaluate, model='%s_genia_nocluster.pickle' % model, corpus='genia', clusters=False) ctx.invoke(evaluate, model='%s_genia.pickle' % model, corpus='wsj', clusters=True) ctx.invoke(evaluate, model='%s_genia.pickle' % model, corpus='genia', clusters=True) ctx.invoke(evaluate, model='%s_wsj_genia_nocluster.pickle' % model, corpus='wsj', clusters=False) ctx.invoke(evaluate, model='%s_wsj_genia_nocluster.pickle' % model, corpus='genia', clusters=False) ctx.invoke(evaluate, model='%s_wsj_genia.pickle' % model, corpus='wsj', clusters=True) ctx.invoke(evaluate, model='%s_wsj_genia.pickle' % model, corpus='genia', clusters=True)
Evaluate POS taggers on WSJ and GENIA.
def get(self, key): """ Retrieve an item from the cache by key. :param key: The cache key :type key: str :return: The cache value """ value = self._redis.get(self._prefix + key) if value is not None: return self.unserialize(value)
Retrieve an item from the cache by key. :param key: The cache key :type key: str :return: The cache value
def _flush(self): """ Flush metadata to the backing file :return: """ with open(self.metadata_file, 'w') as f: json.dump(self.metadata, f)
Flush metadata to the backing file :return:
def get_text_position(fig, ax, ha='left', va='top', pad_scale=1.0): """Return text position inside of the given axis""" ## Check and preprocess input arguments try: pad_scale = float(pad_scale) except: raise TypeError("'pad_scale should be of type 'float'") for arg in [va, ha]: assert type(arg) is str arg = arg.lower() # Make it lowercase to prevent case problem. ## Get axis size in inches ax_height, ax_width = get_ax_size_in_inch(fig, ax) ## Construct inversion factor from inch to plot coordinate length_x = ax.get_xlim()[1] - ax.get_xlim()[0] length_y = ax.get_ylim()[1] - ax.get_ylim()[0] inch2coord_x = length_x / ax_width inch2coord_y = length_y / ax_height ## Set padding size relative to the text size #pad_inch = text_bbox_inch.height * pad_scale #pad_inch = fontsize_points * point2inch * pad_scale ax_length_geom_average = (ax_height * ax_width) ** 0.5 pad_inch = ax_length_geom_average * 0.03 * pad_scale pad_inch_x, pad_inch_y = pad_inch, pad_inch pad_coord_x = pad_inch_x * inch2coord_x pad_coord_y = pad_inch_y * inch2coord_y if ha == 'left': pos_x = ax.get_xlim()[0] + pad_coord_x elif ha == 'right': pos_x = ax.get_xlim()[1] - pad_coord_x else: raise Exception("Unsupported value for 'ha'") if va in ['top','up','upper']: pos_y = ax.get_ylim()[1] - pad_coord_y elif va in ['bottom','down','lower']: pos_y = ax.get_ylim()[0] + pad_coord_y else: raise Exception("Unsupported value for 'va'") return pos_x, pos_y
Return text position inside of the given axis
def processpool_map(task, args, message, concurrency, batchsize=1, nargs=None): """ See http://stackoverflow.com/a/16071616 """ njobs = get_njobs(nargs, args) show_progress = bool(message) batches = grouper(batchsize, tupleise(args)) def batched_task(*batch): return [task(*job) for job in batch] if show_progress: message += ' (PP:{}w:{}b)'.format(concurrency, batchsize) pbar = setup_progressbar(message, njobs, simple_progress=True) pbar.start() q_in = multiprocessing.Queue() # Should I limit either queue size? Limiting in-queue q_out = multiprocessing.Queue() # increases time taken to send jobs, makes pbar less useful proc = [multiprocessing.Process(target=fun, args=(batched_task, q_in, q_out)) for _ in range(concurrency)] for p in proc: p.daemon = True p.start() sent = [q_in.put((i, x)) for (i, x) in enumerate(batches)] [q_in.put((None, None)) for _ in range(concurrency)] res = [] completed_count = 0 for _ in range(len(sent)): result = get_from_queue(q_out) res.append(result) completed_count += len(result[1]) if show_progress: pbar.update(completed_count) [p.join() for p in proc] if show_progress: pbar.finish() return flatten_list([x for (i, x) in sorted(res)])
See http://stackoverflow.com/a/16071616
def GetSubkeyByPath(self, key_path): """Retrieves a subkey by path. Args: key_path (str): path of the subkey. Returns: WinRegistryKey: Windows Registry subkey or None if not found. """ if not self._registry_key and self._registry: self._GetKeyFromRegistry() subkey = self for path_segment in key_paths.SplitKeyPath(key_path): subkey = subkey.GetSubkeyByName(path_segment) if not subkey: break return subkey
Retrieves a subkey by path. Args: key_path (str): path of the subkey. Returns: WinRegistryKey: Windows Registry subkey or None if not found.
def remove_component(self, entity: int, component_type: Any) -> int: """Remove a Component instance from an Entity, by type. A Component instance can be removed by providing it's type. For example: world.delete_component(enemy_a, Velocity) will remove the Velocity instance from the Entity enemy_a. Raises a KeyError if either the given entity or Component type does not exist in the database. :param entity: The Entity to remove the Component from. :param component_type: The type of the Component to remove. """ self._components[component_type].discard(entity) if not self._components[component_type]: del self._components[component_type] del self._entities[entity][component_type] if not self._entities[entity]: del self._entities[entity] self.clear_cache() return entity
Remove a Component instance from an Entity, by type. A Component instance can be removed by providing it's type. For example: world.delete_component(enemy_a, Velocity) will remove the Velocity instance from the Entity enemy_a. Raises a KeyError if either the given entity or Component type does not exist in the database. :param entity: The Entity to remove the Component from. :param component_type: The type of the Component to remove.
def send_query(self, query): """Sends a query to the Riemann server :returns: The response message from Riemann """ message = riemann_client.riemann_pb2.Msg() message.query.string = query return self.transport.send(message)
Sends a query to the Riemann server :returns: The response message from Riemann
def get_revoked_certs(self): """ Returns revoked certificates of this CA (does not include expired certificates) """ now = timezone.now() return self.cert_set.filter(revoked=True, validity_start__lte=now, validity_end__gte=now)
Returns revoked certificates of this CA (does not include expired certificates)
def get_json(request, token): """Return matching results as JSON""" result = [] searchtext = request.GET['q'] if len(searchtext) >= 3: pickled = _simple_autocomplete_queryset_cache.get(token, None) if pickled is not None: app_label, model_name, query = pickle.loads(pickled) model = apps.get_model(app_label, model_name) queryset = QuerySet(model=model, query=query) fieldname = get_search_fieldname(model) di = {'%s__istartswith' % fieldname: searchtext} app_label_model = '%s.%s' % (app_label, model_name) max_items = get_setting(app_label_model, 'max_items', 10) items = queryset.filter(**di).order_by(fieldname)[:max_items] # Check for duplicate strings counts = {} for item in items: if hasattr(item, "__unicode__"): key = item.__unicode__() else: key = str(item) #key = unicode(item) counts.setdefault(key, 0) counts[key] += 1 # Assemble result set for item in items: #key = value = unicode(item) if hasattr(item, "__unicode__"): key = value = item.__unicode__() else: key = value = str(item) value = getattr(item, fieldname) if counts[key] > 1: func = get_setting( app_label_model, 'duplicate_format_function', lambda obj, model, content_type: content_type.name ) content_type = ContentType.objects.get_for_model(model) value = '%s (%s)' % (value, func(item, model, content_type)) result.append((item.id, value)) else: result = 'CACHE_MISS' return HttpResponse(json.dumps(result))
Return matching results as JSON
def coffee(input, output, **kw): """Process CoffeeScript files""" subprocess.call([current_app.config.get('COFFEE_BIN'), '-c', '-o', output, input])
Process CoffeeScript files
def p_commands_list(p): """commands : commands command""" p[0] = p[1] # section 3.2: REQUIRE command must come before any other commands if p[2].RULE_IDENTIFIER == 'REQUIRE': if any(command.RULE_IDENTIFIER != 'REQUIRE' for command in p[0].commands): print("REQUIRE command on line %d must come before any " "other non-REQUIRE commands" % p.lineno(2)) raise SyntaxError # section 3.1: ELSIF and ELSE must follow IF or another ELSIF elif p[2].RULE_IDENTIFIER in ('ELSIF', 'ELSE'): if p[0].commands[-1].RULE_IDENTIFIER not in ('IF', 'ELSIF'): print("ELSIF/ELSE command on line %d must follow an IF/ELSIF " "command" % p.lineno(2)) raise SyntaxError p[0].commands.append(p[2])
commands : commands command
def normalize(path_name, override=None): """ Prepares a path name to be worked with. Path name must not be empty. This function will return the 'normpath'ed path and the identity of the path. This function takes an optional overriding argument for the identity. ONLY PROVIDE OVERRIDE IF: 1) YOU AREWORKING WITH A FOLDER THAT HAS AN EXTENSION IN THE NAME 2) YOU ARE MAKING A FILE WITH NO EXTENSION """ identity = identify(path_name, override=override) new_path_name = os.path.normpath(os.path.expanduser(path_name)) return new_path_name, identity
Prepares a path name to be worked with. Path name must not be empty. This function will return the 'normpath'ed path and the identity of the path. This function takes an optional overriding argument for the identity. ONLY PROVIDE OVERRIDE IF: 1) YOU AREWORKING WITH A FOLDER THAT HAS AN EXTENSION IN THE NAME 2) YOU ARE MAKING A FILE WITH NO EXTENSION
def add(self, label): """ Add a label to the end of the list. Args: label (Label): The label to add. """ label.label_list = self self.label_tree.addi(label.start, label.end, label)
Add a label to the end of the list. Args: label (Label): The label to add.
def stop_process(self): """ Stop the process. :raises: EnvironmentError if stopping fails due to unknown environment TestStepError if process stops with non-default returncode and return code is not ignored. """ if self.read_thread is not None: self.logger.debug("stop_process::readThread.stop()-in") self.read_thread.stop() self.logger.debug("stop_process::readThread.stop()-out") returncode = None if self.proc: self.logger.debug("os.killpg(%d)", self.proc.pid) for sig in (signal.SIGINT, signal.SIGTERM, signal.SIGKILL): timeout = 5 try: try: self.logger.debug("Trying signal %s", sig) os.killpg(self.proc.pid, sig) except AttributeError: self.logger.debug("os.killpg::AttributeError") # Failed most likely because in windows, # so use taskkill to kill whole process tree of proc if platform.system() == "Windows": subprocess.call(['taskkill', '/F', '/T', '/PID', str(self.proc.pid)]) else: self.logger.debug("os.killpg::unknown env") raise EnvironmentError("Unknown platform, " "don't know how to terminate process") while self.proc.poll() is None and timeout > 0: time.sleep(1) timeout -= 1 returncode = self.proc.poll() if returncode is not None: break except OSError as error: self.logger.info("os.killpg::OSError: %s", error) self.proc = None if returncode is not None: self.logger.debug("Process stopped with returncode %s" % returncode) if returncode != self.default_retcode and not self.__ignore_return_code: raise TestStepError("Process stopped with returncode %d" % returncode) self.logger.debug("stop_process-out")
Stop the process. :raises: EnvironmentError if stopping fails due to unknown environment TestStepError if process stops with non-default returncode and return code is not ignored.
def check_sig(self, other): """Check overlap insignificance with another spectrum. Also see :ref:`pysynphot-command-checko`. .. note:: Only use when :meth:`check_overlap` returns "partial". Parameters ---------- other : `SourceSpectrum` or `SpectralElement` The other spectrum. Returns ------- ans : bool `True` means the *lack* of overlap is *insignificant* (i.e., okay to proceed). """ swave = self.wave[N.where(self.throughput != 0)] s1, s2 = swave.min(), swave.max() owave = other.wave o1, o2 = owave.min(), owave.max() lorange = sorted([s1, o1]) hirange = sorted([s2, o2]) # Get the full throughput total = self.integrate() # Now get the other two pieces # We cannot yet do # low = self[slice(*lowrange)].integrate() wave = self.wave idxs = [N.searchsorted(wave, lorange, 'left'), N.searchsorted(wave, hirange, 'left')] excluded = 0.0 for idx in idxs: try: excluded += self.integrate(wave=wave[slice(*idx)]) except IndexError: pass # If the range is zero, do nothing if excluded/total < 0.01: return True else: return False
Check overlap insignificance with another spectrum. Also see :ref:`pysynphot-command-checko`. .. note:: Only use when :meth:`check_overlap` returns "partial". Parameters ---------- other : `SourceSpectrum` or `SpectralElement` The other spectrum. Returns ------- ans : bool `True` means the *lack* of overlap is *insignificant* (i.e., okay to proceed).
def compile_action_preconditions_checking(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor]) -> tf.Tensor: '''Combines the action preconditions into an applicability checking op. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. Returns: A boolean tensor for checking if `action` is application in `state`. ''' with self.graph.as_default(): with tf.name_scope('action_preconditions_checking'): preconds = self.compile_action_preconditions(state, action) all_preconds = tf.stack([p.tensor for p in preconds], axis=1) checking = tf.reduce_all(all_preconds, axis=1) return checking
Combines the action preconditions into an applicability checking op. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. Returns: A boolean tensor for checking if `action` is application in `state`.
def install_translations(config): """Add check translations according to ``config`` as a fallback to existing translations""" if not config: return from . import _translation checks_translation = gettext.translation(domain=config["domain"], localedir=internal.check_dir / config["localedir"], fallback=True) _translation.add_fallback(checks_translation)
Add check translations according to ``config`` as a fallback to existing translations
def __leaf(i, j, first, maxfirst, prevleaf, ancestor): """ Determine if j is leaf of i'th row subtree. """ jleaf = 0 if i<=j or first[j] <= maxfirst[i]: return -1, jleaf maxfirst[i] = first[j] jprev = prevleaf[i] prevleaf[i] = j if jprev == -1: jleaf = 1 else: jleaf = 2 if jleaf == 1: return i, jleaf q = jprev while q != ancestor[q]: q = ancestor[q] s = jprev while s != q: sparent = ancestor[s] ancestor[s] = q s = sparent return q, jleaf
Determine if j is leaf of i'th row subtree.
def _node_set(self) -> List["InstanceNode"]: """XPath - return the list of all receiver's nodes.""" return list(self) if isinstance(self.value, ArrayValue) else [self]
XPath - return the list of all receiver's nodes.
def equi(map_axis, centerlon, centerlat, radius, color, alpha=1.0): """ This function enables A95 error ellipses to be drawn in cartopy around paleomagnetic poles in conjunction with shoot (modified from: http://www.geophysique.be/2011/02/20/matplotlib-basemap-tutorial-09-drawing-circles/). """ if not has_cartopy: print('-W- cartopy must be installed to run ipmag.equi') return glon1 = centerlon glat1 = centerlat X = [] Y = [] for azimuth in range(0, 360): glon2, glat2, baz = shoot(glon1, glat1, azimuth, radius) X.append(glon2) Y.append(glat2) X.append(X[0]) Y.append(Y[0]) plt.plot(X[::-1], Y[::-1], color=color, transform=ccrs.Geodetic(), alpha=alpha)
This function enables A95 error ellipses to be drawn in cartopy around paleomagnetic poles in conjunction with shoot (modified from: http://www.geophysique.be/2011/02/20/matplotlib-basemap-tutorial-09-drawing-circles/).
def create_package_node(self, team, user, package, dry_run=False): """ Creates a new package and initializes its contents. See `install_package`. """ contents = RootNode(dict()) if dry_run: return contents self.check_name(team, user, package) assert contents is not None self.create_dirs() # Delete any existing data. path = self.package_path(team, user, package) try: os.remove(path) except OSError: pass return contents
Creates a new package and initializes its contents. See `install_package`.
def _trim_tree(state): """Trim empty leaf nodes from the tree. - To simplify the tree conversion, empty nodes are added before it is known if they will contain items that connect back to the authenticated subject. If there are no connections, the nodes remain empty, which causes them to be removed here. - Removing a leaf node may cause the parent to become a new empty leaf node, so the function is repeated until there are no more empty leaf nodes. """ for n in list(state.tree.leaf_node_gen): if n.type_str == TYPE_NODE_TAG: n.parent.child_list.remove(n) return _trim_tree(state)
Trim empty leaf nodes from the tree. - To simplify the tree conversion, empty nodes are added before it is known if they will contain items that connect back to the authenticated subject. If there are no connections, the nodes remain empty, which causes them to be removed here. - Removing a leaf node may cause the parent to become a new empty leaf node, so the function is repeated until there are no more empty leaf nodes.
def iso_abund(self, cycle, stable=False, amass_range=None, mass_range=None, ylim=[0,0], ref=-1, show=True, log_logic=True, decayed=False, color_plot=True, grid=False, point_set=1, include_title=False, data_provided=False,thedata=None, verbose=True, mov=False,drawfig=None,drawax=None,show_names=True, label=None,colour=None,elemaburtn=False,mypoint=None,plot_type=['-','--','-.',':','-']): ''' plot the abundance of all the chemical species Parameters ---------- cycle : string, integer or list The cycle of interest. If it is a list of cycles, this method will do a plot for each cycle and save them to a file. stable : boolean, optional A boolean of whether to filter out the unstables. The defaults is False. amass_range : list, optional A 1x2 array containing the lower and upper atomic mass range. If None plot entire available atomic mass range. The default is None. mass_range : list, optional A 1x2 array containing the lower and upper mass range. If this is an instance of abu_vector this will only plot isotopes that have an atominc mass within this range. This will throw an error if this range does not make sense ie [45,2]. If None, it will plot over the entire range. The defaults is None. ylim : list, optional A 1x2 array containing the lower and upper Y limits. If it is [0,0], then ylim will be determined automatically. The default is [0,0]. ref : integer or list, optional reference cycle. If it is not -1, this method will plot the abundences of cycle devided by the cycle of the same instance given in the ref variable. If ref is a list it will be interpreted to have two elements: ref=['dir/of/ref/run',cycle] which uses a refernece cycle from another run. If any abundence in the reference cycle is zero, it will replace it with 1e-99. The default is -1. show : boolean, optional Boolean of if the plot should be displayed. The default is True. log_logic : boolean, optional Plot abundances in log scale or linear. The default is True. decayed : boolean, optional If True plot decayed distributions, else plot life distribution. The default is False. color_plot : boolean, optional Color dots and lines [True/False]. The default is True. grid : boolean, optional print grid. The default is False. point_set : integer, optional Set to 0, 1 or 2 to select one of three point sets, useful for multiple abundances or ratios in one plot. The defalult is 1. include_title : boolean, optional Include a title with the plot. The default is False. drawfig, drawax, mov : optional, not necessary for user to set these variables The figure and axes containers to be drawn on, and whether or not a movie is being made (only True when se.movie is called, which sets mov to True automatically elemaburtn : boolean, private If true, iso_abund() returns after writing self.***_iso_to_plot for use with other plotting routines.f mypoint : string, optional fix the marker style of all the points in this plot to one type, given as a string. If None, multiple point styles are used as per point_set. The default is None ''' plotType=self._classTest() if str(cycle.__class__)=="<type 'list'>": self.iso_abundMulti(cycle, stable,amass_range,mass_range,ylim,ref, decayed,include_title,color_plot=color_plot,grid=False,point_set=point_set) return if mass_range!=None and mass_range[0]>mass_range[1]: print('Please input a proper mass range') print('Returning None') return None if amass_range!=None and amass_range[0]>amass_range[1]: print('Please input a proper Atomic mass range') print('Returning None') return None if plotType=='se': if decayed: print('Decay option not yet implemented for mppnp - but it is easy do! Consider investing the time!') return None # get things as arrays if not data_provided: cycle=self.se.findCycle(cycle) a_iso_to_plot = array(self.se.A) abunds = self.get(cycle,'iso_massf') isotope_to_plot = array(self.se.isotopes) z_iso_to_plot = array(self.se.Z) isomers_to_plot = array(self.se.isomeric_states) if ref >-1: ref=self.se.findCycle(ref) abundsRef=self.se.get(ref,'iso_massf') masses = self.se.get(cycle,'mass') else: cycle=cycle # why so serious? a_iso_to_plot = array(self.se.A) abunds = thedata[0] isotope_to_plot = array(self.se.isotopes) z_iso_to_plot = array(self.se.Z) isomers_to_plot = array(self.se.isomeric_states) if ref >-1: raise IOError("No. It's not ready yet.") #ref=self.se.findCycle(ref) #abundsRef=self.se.get(ref,'iso_massf') masses = thedata[1] if mass_range == None: if verbose: print('Using default mass range') mass_range = [min(masses),max(masses)] masses.sort() mass_range.sort() if amass_range == None: amass_range=[int(min(a_iso_to_plot)),int(max(a_iso_to_plot))] # remove neutrons - this could move in the non- se/PPN specific part below if 0 in z_iso_to_plot: ind_neut = where(z_iso_to_plot==0)[0][0] a_iso_to_plot = delete(a_iso_to_plot,ind_neut) z_iso_to_plot = delete(z_iso_to_plot,ind_neut) isomers_to_plot = delete(isomers_to_plot,ind_neut) isotope_to_plot = delete(isotope_to_plot,ind_neut) abunds = delete(abunds,ind_neut,1) if ref >-1: abundsRef = delete(abundsRef,ind_neut,1) # extract amass_range acon=(a_iso_to_plot>=amass_range[0]) & (a_iso_to_plot<=amass_range[1]) isomers_to_plot = isomers_to_plot[acon] isotope_to_plot = isotope_to_plot[acon] z_iso_to_plot = z_iso_to_plot[acon] abunds = abunds.T[acon].T if ref >-1: abundsRef = abundsRef.T[acon].T a_iso_to_plot = a_iso_to_plot[acon] el_iso_to_plot = array([x.split('-')[0] for x in isotope_to_plot.tolist()]) # apply mass range if mass_range == None: if verbose: print('Using default mass range') mass_range = [min(masses),max(masses)] mass_range.sort() aabs = [] if ref >-1: cyc = [cycle,ref] abus = [abunds,abundsRef] else: cyc = [cycle] abus = [abunds] for cc,aa in zip(cyc,abus): if not data_provided: masses = self.se.get(cc,'mass') else: masses=masses # why so serious? masses.sort() dmass = masses[1:] - masses[:-1] # I should check the grid definition dmass = append(dmass,0.) mcon = (masses>=mass_range[0]) & (masses<=mass_range[1]) dmass = dmass[mcon] aa = aa[mcon] # average over mass range: aa = (aa.T*dmass).T.sum(0) aa = old_div(aa, (mass_range[1] - mass_range[0])) # abunds has now length of isotope_to_plot aabs.append(aa) if ref >-1: abunds = old_div(aabs[0],(aabs[1]+1.e-99)) else: abunds = aabs[0] self.a_iso_to_plot=a_iso_to_plot self.isotope_to_plot=isotope_to_plot self.z_iso_to_plot=z_iso_to_plot self.el_iso_to_plot=el_iso_to_plot self.abunds=abunds self.isomers_to_plot=isomers_to_plot if elemaburtn: return # self.isotopes = self.se.isotopes elif plotType=='PPN': print("This method adds the following variables to the instance:") print("a_iso_to_plot mass number of plotted range of species") print("isotope_to_plot corresponding list of isotopes") print("z_iso_to_plot corresponding charge numbers") print("el_iso_to_plot corresponding element names") print("abunds corresponding abundances") print("isom isomers and their abundance") self.get(cycle,decayed=decayed) if ref is not -1: if type(ref) is list: # reference cycle from other run import ppn pp=ppn.abu_vector(ref[0]) abunds_pp=pp.get(ref[1],decayed=decayed) self.abunds=old_div(self.abunds,pp.abunds) else: abunds=self.abunds self.get(ref,decayed=decayed) self.abunds=old_div(abunds,(self.abunds+1.e-99)) if amass_range == None: amass_range=[min(self.a_iso_to_plot),max(self.a_iso_to_plot)] aa=ma.masked_outside(self.a_iso_to_plot,amass_range[0],amass_range[1]) isotope_to_plot=ma.array(self.isotope_to_plot,mask=aa.mask).compressed() z_iso_to_plot=ma.array(self.z_iso_to_plot,mask=aa.mask).compressed() el_iso_to_plot=ma.array(self.el_iso_to_plot,mask=aa.mask).compressed() abunds=ma.array(self.abunds,mask=aa.mask).compressed() a_iso_to_plot=aa.compressed() isomers_to_plot=[] for i in range(len(self.isom)): if int(self.isom[i][0].split('-')[1])>100: isomers_to_plot.append(self.isom[i]) self.a_iso_to_plot=a_iso_to_plot self.isotope_to_plot=isotope_to_plot self.z_iso_to_plot=z_iso_to_plot self.el_iso_to_plot=el_iso_to_plot self.abunds=abunds self.isomers_to_plot=isomers_to_plot else: print('This method, iso_abund, is not supported by this class') print('Returning None') return None if verbose: print('Using the following conditions:') if plotType=='se': print('\tmass_range:', mass_range[0], mass_range[1]) print('\tAtomic mass_range:', amass_range[0], amass_range[1]) print('\tcycle: ',cycle) print('\tplot only stable:',stable) print('\tplot decayed: ',decayed) if stable: # remove unstables: # For the element that belongs to the isotope at index 5 in isotope_to_plot # (C-12) the following gives the mass numbers of stable elements: # self.stable_el[self.stable_names.index(el_iso_to_plot[5])][1:] ind_delete=[] for i in range(len(isotope_to_plot)): if a_iso_to_plot[i] not in self.stable_el[self.stable_names.index(el_iso_to_plot[i])][1:]: ind_delete.append(i) a_iso_to_plot = delete(a_iso_to_plot, ind_delete) z_iso_to_plot = delete(z_iso_to_plot, ind_delete) isomers_to_plot = delete(isomers_to_plot,ind_delete) isotope_to_plot = delete(isotope_to_plot,ind_delete) el_iso_to_plot = delete(el_iso_to_plot, ind_delete) abunds = delete(abunds, ind_delete) # el_list=[] # list of elements in el_iso_to_plot # # for el in self.elements_names: # if el in el_iso_to_plot: # el_list.append(el) # SJONES implicit loop: el_list = [el for el in self.elements_names if el in el_iso_to_plot] abund_plot = [] # extract for each element an abundance and associated mass_num = [] # mass number array, sorted by mass number for el in el_list: numbers = a_iso_to_plot[(el_iso_to_plot==el)] abund_plot.append(abunds[(el_iso_to_plot==el)][argsort(numbers)]) mass_num.append(sort(numbers)) # now plot: #plot_type = ['-','--','-.',':','-'] ##now implemented as an arg print(plot_type) while len(plot_type)<=4: plot_type.append('') print(plot_type) pl_index = 0 if mypoint is None: points = [['o','^','p','h','*'],['x','+','D','>','s'],['H','v','<','*','3']] else: points = [ [mypoint]*5 , [mypoint]*5 , [mypoint]*5] if color_plot: colors = ['g','r','c','m','k'] elif colour is not None: colors = [colour]*5 else: colors = ['k','k','k','k','k'] ylim1 = 1.e99 ylim2 = -1.e99 # initialise movie-related things: if mov: artists=[] ax=drawax fig=drawfig elif drawax is not None: ax=drawax else: ax=pl.axes() if drawfig is not None: fig=drawfig for j in range(len(abund_plot)): #Loop through the elements of interest # for l in xrange(len(abund_plot[j])): # if abund_plot[j][l] == 0: # abund_plot[j][l] = 1e-99 abund_plot[j] = np.maximum(abund_plot[j],1.e-99) # SJONES instead of looping # a_dum=zeros(len(abund_plot[j])) # this I (FH) have to do because for some if log_logic == False: # reason log10(abu_abund[j]) does not work a_dum = abund_plot[j] # although abu_abund[j] is a numpy array?!? else: # for ii in range(len(abund_plot[j])): # a_dum[ii]=log10(abund_plot[j][ii]) a_dum=np.log10(abund_plot[j]) # SJONES this seems to work fine for me if type(colors[0]) is str: this_label=str(colors[pl_index]+points[point_set][pl_index]+\ plot_type[pl_index]) else: this_label=None if mov: artist1,=ax.plot(mass_num[j],a_dum,this_label,markersize=6, markeredgecolor='None') else: if this_label is not None: if label is not None and j==0: pl.plot(mass_num[j],a_dum,this_label,markersize=6, label=label,markeredgecolor='None') pl.legend(loc='best').draw_frame(False) else: pl.plot(mass_num[j],a_dum,this_label,markersize=6, markeredgecolor='None') else: if label is not None and j==0: pl.plot(mass_num[j],a_dum, color=colors[pl_index], marker=points[point_set][pl_index], linestyle=plot_type[pl_index], markersize=6,label=label, markeredgecolor='None') pl.legend(loc='best').draw_frame(False) else: pl.plot(mass_num[j],a_dum, color=colors[pl_index], marker=points[point_set][pl_index], linestyle=plot_type[pl_index], markersize=6,markeredgecolor='None') abu_max = max(a_dum) max_index=where(a_dum==abu_max)[0][0] coordinates=[mass_num[j][max_index],abu_max] if mov: artist2=ax.text(coordinates[0]+0.1,1.05*coordinates[1],el_list[j],clip_on=True) else: if show_names: # pl.text(coordinates[0]+0.1,1.05*coordinates[1],el_list[j],clip_on=True) pl.text(coordinates[0],np.log10(2.2*10.**coordinates[1]), el_list[j],clip_on=True, horizontalalignment='center') pl_index+=1 if pl_index > 4: pl_index = 0 ylim1=min(ylim1,min(a_dum)) ylim2=max(ylim2,max(a_dum)) if mov: artists.extend([artist1,artist2]) # now trimming the ylims if log_logic: dylim=0.05*(ylim2-ylim1) ylim1 = ylim1 -dylim ylim2 = ylim2 +dylim if ref is not -1: ylim2 = min(ylim2,4) ylim1 = max(ylim1,-4) else: ylim2 = min(ylim2,0.2) ylim1 = max(ylim1,-13) else: ylim1 = ylim1 *0.8 ylim2 = ylim2 *1.1 if include_title: if plotType=='se': if ref == -1: title = str('Range %4.2f' %mass_range[0]) + str('-%4.2f' %mass_range[1]) +\ str(' for cycle %d' %int(cycle)) else: title = str('Range %4.2f' %mass_range[0]) + \ str('-%4.2f' %mass_range[1]) + str(' for cycle %d' %int(cycle))+\ str(' relative to cycle %d' %int(ref)) else: if ref == -1: title = str('Cycle %d' %int(cycle)) else: title = str('Cycle %d' %int(cycle))+\ str(' relative to cycle %d' %int(ref)) print("including title: ...") if mov: artist1,=ax.title(title) artists.append(artist1) else: pl.title(title) if ylim[0] == 0 and ylim[1] == 0: pl.ylim(ylim1,ylim2) else: pl.ylim(ylim[0],ylim[1]) pl.xlim([amass_range[0]-.5,amass_range[1]+.5]) pl.xlabel('mass number (A)',fontsize=14) if ref is not -1: if log_logic: pl.ylabel(r'log abundance ratio',fontsize=14) else: pl.ylabel(r'abundance ratio',fontsize=14) else: if log_logic: pl.ylabel(r'log mass fraction ',fontsize=14) else: pl.ylabel(r'mass fraction',fontsize=14) if amass_range != None: minimum_mass = amass_range[0] maximum_mass = amass_range[1] elif mass_range != None: minimum_mass = mass_range[0] maximum_mass = mass_range[1] else: minimum_mass = 0 maximum_mass = 200 if log_logic == False: if mov: artist1,=ax.plot([amass_range[0]-.5,amass_range[1]+.5],[1,1],'k-') artists.append(artist1) else: pl.plot([amass_range[0]-.5,amass_range[1]+.5],[1,1],'k-') else: if mov: artist1,=ax.plot([amass_range[0]-.5,amass_range[1]+.5],[0,0],'k-') artists.append(artist1) else: pl.plot([amass_range[0]-.5,amass_range[1]+.5],[0,0],'k-') labelsx=[] if (maximum_mass-minimum_mass) > 100: delta_labelsx = 10 else: delta_labelsx = 5 iii = amass_range[0]%delta_labelsx if iii == 0: labelsx.append(str(amass_range[0])) else: labelsx.append(' ') iii = iii+1 kkk = 0 for label1 in range(amass_range[1]-amass_range[0]): if iii == 5: kkk = kkk+1 labelsx.append(str((iii*kkk)+amass_range[0]-(amass_range[0]%5))) iii = 0 iii = iii+1 else: labelsx.append(' ') iii = iii+1 if delta_labelsx == 5: xticks = arange(amass_range[0],amass_range[1],1) pl.xticks(xticks,labelsx) else: pl.xticks() # SJONES moved the pl.grid and pl.show to the very end if grid: pl.grid() if show: pl.show() ##!!FOR!!###### print 'LEN LABELS= ', len(labelsx) ##DEBUGGING#### ####!!!######## for bbb in range (len(labelsx)): ############### print labelsx[bbb] if mov: return artists
plot the abundance of all the chemical species Parameters ---------- cycle : string, integer or list The cycle of interest. If it is a list of cycles, this method will do a plot for each cycle and save them to a file. stable : boolean, optional A boolean of whether to filter out the unstables. The defaults is False. amass_range : list, optional A 1x2 array containing the lower and upper atomic mass range. If None plot entire available atomic mass range. The default is None. mass_range : list, optional A 1x2 array containing the lower and upper mass range. If this is an instance of abu_vector this will only plot isotopes that have an atominc mass within this range. This will throw an error if this range does not make sense ie [45,2]. If None, it will plot over the entire range. The defaults is None. ylim : list, optional A 1x2 array containing the lower and upper Y limits. If it is [0,0], then ylim will be determined automatically. The default is [0,0]. ref : integer or list, optional reference cycle. If it is not -1, this method will plot the abundences of cycle devided by the cycle of the same instance given in the ref variable. If ref is a list it will be interpreted to have two elements: ref=['dir/of/ref/run',cycle] which uses a refernece cycle from another run. If any abundence in the reference cycle is zero, it will replace it with 1e-99. The default is -1. show : boolean, optional Boolean of if the plot should be displayed. The default is True. log_logic : boolean, optional Plot abundances in log scale or linear. The default is True. decayed : boolean, optional If True plot decayed distributions, else plot life distribution. The default is False. color_plot : boolean, optional Color dots and lines [True/False]. The default is True. grid : boolean, optional print grid. The default is False. point_set : integer, optional Set to 0, 1 or 2 to select one of three point sets, useful for multiple abundances or ratios in one plot. The defalult is 1. include_title : boolean, optional Include a title with the plot. The default is False. drawfig, drawax, mov : optional, not necessary for user to set these variables The figure and axes containers to be drawn on, and whether or not a movie is being made (only True when se.movie is called, which sets mov to True automatically elemaburtn : boolean, private If true, iso_abund() returns after writing self.***_iso_to_plot for use with other plotting routines.f mypoint : string, optional fix the marker style of all the points in this plot to one type, given as a string. If None, multiple point styles are used as per point_set. The default is None
def combine_context_errors(self): """Each alignment contributes some information to the error report. These reports for each alignment need to be gone through and combined into one report. :returns: Dictionary containing the error counts on context base :rtype: dict() """ r = {} if self._target_context_errors: r = self._target_context_errors for k in [x.get_context_target_errors() for x in self._alignment_errors]: for b in k: if b not in r: r[b] = {} for c in k[b]: if c not in r[b]: r[b][c] = {} for a in k[b][c]: if a not in r[b][c]: r[b][c][a] = {} r[b][c][a]['total'] = 0 r[b][c][a]['types'] = {} r[b][c][a]['total'] += k[b][c][a]['total'] for type in k[b][c][a]['types']: if type not in r[b][c][a]['types']: r[b][c][a]['types'][type] = 0 r[b][c][a]['types'][type] += k[b][c][a]['types'][type] self._target_context_errors = r r = {} if self._query_context_errors: r = self._query_context_errors for k in [x.get_context_query_errors() for x in self._alignment_errors]: for b in k: if b not in r: r[b] = {} for c in k[b]: if c not in r[b]: r[b][c] = {} for a in k[b][c]: if a not in r[b][c]: r[b][c][a] = {} r[b][c][a]['total'] = 0 r[b][c][a]['types'] = {} r[b][c][a]['total'] += k[b][c][a]['total'] for type in k[b][c][a]['types']: if type not in r[b][c][a]['types']: r[b][c][a]['types'][type] = 0 r[b][c][a]['types'][type] += k[b][c][a]['types'][type] self._query_context_errors = r
Each alignment contributes some information to the error report. These reports for each alignment need to be gone through and combined into one report. :returns: Dictionary containing the error counts on context base :rtype: dict()
def most_by_uncertain(self, y): """ Extracts the predicted classes which correspond to the selected class (y) and have probabilities nearest to 1/number_of_classes (eg. 0.5 for 2 classes, 0.33 for 3 classes) for the selected class. Arguments: y (int): the selected class Returns: idxs (numpy.ndarray): An array of indexes (numpy.ndarray) """ return self.most_uncertain_by_mask((self.ds.y == y), y)
Extracts the predicted classes which correspond to the selected class (y) and have probabilities nearest to 1/number_of_classes (eg. 0.5 for 2 classes, 0.33 for 3 classes) for the selected class. Arguments: y (int): the selected class Returns: idxs (numpy.ndarray): An array of indexes (numpy.ndarray)
def reset(self): """ Reset the Quantum Abstract Machine to its initial state, which is particularly useful when it has gotten into an unwanted state. This can happen, for example, if the QAM is interrupted in the middle of a run. """ self._variables_shim = {} self._executable = None self._bitstrings = None self.status = 'connected'
Reset the Quantum Abstract Machine to its initial state, which is particularly useful when it has gotten into an unwanted state. This can happen, for example, if the QAM is interrupted in the middle of a run.
def reset_course(self, course_id): """ Reset a course. Deletes the current course, and creates a new equivalent course with no content, but all sections and users moved over. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id self.logger.debug("POST /api/v1/courses/{course_id}/reset_content with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/reset_content".format(**path), data=data, params=params, single_item=True)
Reset a course. Deletes the current course, and creates a new equivalent course with no content, but all sections and users moved over.
def determine_encoding(buf): """Return the appropriate encoding for the given CSS source, according to the CSS charset rules. `buf` may be either a string or bytes. """ # The ultimate default is utf8; bravo, W3C bom_encoding = 'UTF-8' if not buf: # What return bom_encoding if isinstance(buf, six.text_type): # We got a file that, for whatever reason, produces already-decoded # text. Check for the BOM (which is useless now) and believe # whatever's in the @charset. if buf[0] == '\ufeff': buf = buf[0:] # This is pretty similar to the code below, but without any encoding # double-checking. charset_start = '@charset "' charset_end = '";' if buf.startswith(charset_start): start = len(charset_start) end = buf.index(charset_end, start) return buf[start:end] else: return bom_encoding # BOMs if buf[:3] == b'\xef\xbb\xbf': bom_encoding = 'UTF-8' buf = buf[3:] if buf[:4] == b'\x00\x00\xfe\xff': bom_encoding = 'UTF-32BE' buf = buf[4:] elif buf[:4] == b'\xff\xfe\x00\x00': bom_encoding = 'UTF-32LE' buf = buf[4:] if buf[:4] == b'\x00\x00\xff\xfe': raise UnicodeError("UTF-32-2143 is not supported") elif buf[:4] == b'\xfe\xff\x00\x00': raise UnicodeError("UTF-32-2143 is not supported") elif buf[:2] == b'\xfe\xff': bom_encoding = 'UTF-16BE' buf = buf[2:] elif buf[:2] == b'\xff\xfe': bom_encoding = 'UTF-16LE' buf = buf[2:] # The spec requires exactly this syntax; no escapes or extra spaces or # other shenanigans, thank goodness. charset_start = '@charset "'.encode(bom_encoding) charset_end = '";'.encode(bom_encoding) if buf.startswith(charset_start): start = len(charset_start) end = buf.index(charset_end, start) encoded_encoding = buf[start:end] encoding = encoded_encoding.decode(bom_encoding) # Ensure that decoding with the specified encoding actually produces # the same @charset rule encoded_charset = buf[:end + len(charset_end)] if (encoded_charset.decode(encoding) != encoded_charset.decode(bom_encoding)): raise UnicodeError( "@charset {0} is incompatible with detected encoding {1}" .format(bom_encoding, encoding)) else: # With no @charset, believe the BOM encoding = bom_encoding return encoding
Return the appropriate encoding for the given CSS source, according to the CSS charset rules. `buf` may be either a string or bytes.
def update_stat(self, mode='open', infostr='', stat=''): """ write operation stats to log :param mode: 'open', 'saveas', 'listtree' :param infostr: string to put into info_st :param stat: 'OK' or 'ERR' """ self._update_stat[mode](mode, infostr, stat)
write operation stats to log :param mode: 'open', 'saveas', 'listtree' :param infostr: string to put into info_st :param stat: 'OK' or 'ERR'
def _key_deploy_run(self, host, target, re_run=True): ''' The ssh-copy-id routine ''' argv = [ 'ssh.set_auth_key', target.get('user', 'root'), self.get_pubkey(), ] single = Single( self.opts, argv, host, mods=self.mods, fsclient=self.fsclient, thin=self.thin, **target) if salt.utils.path.which('ssh-copy-id'): # we have ssh-copy-id, use it! stdout, stderr, retcode = single.shell.copy_id() else: stdout, stderr, retcode = single.run() if re_run: target.pop('passwd') single = Single( self.opts, self.opts['argv'], host, mods=self.mods, fsclient=self.fsclient, thin=self.thin, **target) stdout, stderr, retcode = single.cmd_block() try: data = salt.utils.json.find_json(stdout) return {host: data.get('local', data)} except Exception: if stderr: return {host: stderr} return {host: 'Bad Return'} if salt.defaults.exitcodes.EX_OK != retcode: return {host: stderr} return {host: stdout}
The ssh-copy-id routine
def from_dict(data, ctx): """ Instantiate a new UnitsAvailableDetails from a dict (generally from loading a JSON response). The data used to instantiate the UnitsAvailableDetails is a shallow copy of the dict passed in, with any complex child types instantiated appropriately. """ data = data.copy() if data.get('long') is not None: data['long'] = ctx.convert_decimal_number( data.get('long') ) if data.get('short') is not None: data['short'] = ctx.convert_decimal_number( data.get('short') ) return UnitsAvailableDetails(**data)
Instantiate a new UnitsAvailableDetails from a dict (generally from loading a JSON response). The data used to instantiate the UnitsAvailableDetails is a shallow copy of the dict passed in, with any complex child types instantiated appropriately.
def do_login(self, line): "login aws-acces-key aws-secret" if line: args = self.getargs(line) self.connect(args[0], args[1]) else: self.connect() self.do_tables('')
login aws-acces-key aws-secret
def _release_line(c): """ Examine current repo state to determine what type of release to prep. :returns: A two-tuple of ``(branch-name, line-type)`` where: - ``branch-name`` is the current branch name, e.g. ``1.1``, ``master``, ``gobbledygook`` (or, usually, ``HEAD`` if not on a branch). - ``line-type`` is a symbolic member of `.Release` representing what "type" of release the line appears to be for: - ``Release.BUGFIX`` if on a bugfix/stable release line, e.g. ``1.1``. - ``Release.FEATURE`` if on a feature-release branch (typically ``master``). - ``Release.UNDEFINED`` if neither of those appears to apply (usually means on some unmerged feature/dev branch). """ # TODO: I don't _think_ this technically overlaps with Releases (because # that only ever deals with changelog contents, and therefore full release # version numbers) but in case it does, move it there sometime. # TODO: this and similar calls in this module may want to be given an # explicit pointer-to-git-repo option (i.e. if run from outside project # context). # TODO: major releases? or are they big enough events we don't need to # bother with the script? Also just hard to gauge - when is master the next # 1.x feature vs 2.0? branch = c.run("git rev-parse --abbrev-ref HEAD", hide=True).stdout.strip() type_ = Release.UNDEFINED if BUGFIX_RE.match(branch): type_ = Release.BUGFIX if FEATURE_RE.match(branch): type_ = Release.FEATURE return branch, type_
Examine current repo state to determine what type of release to prep. :returns: A two-tuple of ``(branch-name, line-type)`` where: - ``branch-name`` is the current branch name, e.g. ``1.1``, ``master``, ``gobbledygook`` (or, usually, ``HEAD`` if not on a branch). - ``line-type`` is a symbolic member of `.Release` representing what "type" of release the line appears to be for: - ``Release.BUGFIX`` if on a bugfix/stable release line, e.g. ``1.1``. - ``Release.FEATURE`` if on a feature-release branch (typically ``master``). - ``Release.UNDEFINED`` if neither of those appears to apply (usually means on some unmerged feature/dev branch).
def __parse_tag(self, tag, count): """Raises IOError and APEBadItemError""" fileobj = cBytesIO(tag) for i in xrange(count): tag_data = fileobj.read(8) # someone writes wrong item counts if not tag_data: break if len(tag_data) != 8: raise error size = cdata.uint32_le(tag_data[:4]) flags = cdata.uint32_le(tag_data[4:8]) # Bits 1 and 2 bits are flags, 0-3 # Bit 0 is read/write flag, ignored kind = (flags & 6) >> 1 if kind == 3: raise APEBadItemError("value type must be 0, 1, or 2") key = value = fileobj.read(1) if not key: raise APEBadItemError while key[-1:] != b'\x00' and value: value = fileobj.read(1) if not value: raise APEBadItemError key += value if key[-1:] == b"\x00": key = key[:-1] if PY3: try: key = key.decode("ascii") except UnicodeError as err: reraise(APEBadItemError, err, sys.exc_info()[2]) value = fileobj.read(size) if len(value) != size: raise APEBadItemError value = _get_value_type(kind)._new(value) self[key] = value
Raises IOError and APEBadItemError
def _produce_output(report, failed, setup): ''' Produce output from the report dictionary generated by _generate_report ''' report_format = setup.get('report_format', 'yaml') log.debug('highstate output format: %s', report_format) if report_format == 'json': report_text = salt.utils.json.dumps(report) elif report_format == 'yaml': string_file = StringIO() salt.utils.yaml.safe_dump(report, string_file, default_flow_style=False) string_file.seek(0) report_text = string_file.read() else: string_file = StringIO() _generate_html(report, string_file) string_file.seek(0) report_text = string_file.read() report_delivery = setup.get('report_delivery', 'file') log.debug('highstate report_delivery: %s', report_delivery) if report_delivery == 'file': output_file = _sprinkle(setup.get('file_output', '/tmp/test.rpt')) with salt.utils.files.fopen(output_file, 'w') as out: out.write(salt.utils.stringutils.to_str(report_text)) else: msg = MIMEText(report_text, report_format) sender = setup.get('smtp_sender', '') recipients = setup.get('smtp_recipients', '') host = setup.get('smtp_server', '') port = int(setup.get('smtp_port', 25)) tls = setup.get('smtp_tls') username = setup.get('smtp_username') password = setup.get('smtp_password') if failed: subject = setup.get('smtp_failure_subject', 'Installation failure') else: subject = setup.get('smtp_success_subject', 'Installation success') subject = _sprinkle(subject) msg['Subject'] = subject msg['From'] = sender msg['To'] = recipients log.debug('highstate smtp port: %d', port) smtp = smtplib.SMTP(host=host, port=port) if tls is True: smtp.starttls() log.debug('highstate smtp tls enabled') if username and password: smtp.login(username, password) log.debug('highstate smtp authenticated') smtp.sendmail( sender, [x.strip() for x in recipients.split(',')], msg.as_string()) log.debug('highstate message sent.') smtp.quit()
Produce output from the report dictionary generated by _generate_report
def plot_margins(*, fig=None, inches=1., centers=True, edges=True): """Add lines onto a figure indicating the margins, centers, and edges. Useful for ensuring your figure design scripts work as intended, and for laying out figures. Parameters ---------- fig : matplotlib.figure.Figure object (optional) The figure to plot onto. If None, gets current figure. Default is None. inches : float (optional) The size of the figure margin, in inches. Default is 1. centers : bool (optional) Toggle for plotting lines indicating the figure center. Default is True. edges : bool (optional) Toggle for plotting lines indicating the figure edges. Default is True. """ if fig is None: fig = plt.gcf() size = fig.get_size_inches() # [H, V] trans_vert = inches / size[0] left = matplotlib.lines.Line2D( [trans_vert, trans_vert], [0, 1], transform=fig.transFigure, figure=fig ) right = matplotlib.lines.Line2D( [1 - trans_vert, 1 - trans_vert], [0, 1], transform=fig.transFigure, figure=fig ) trans_horz = inches / size[1] bottom = matplotlib.lines.Line2D( [0, 1], [trans_horz, trans_horz], transform=fig.transFigure, figure=fig ) top = matplotlib.lines.Line2D( [0, 1], [1 - trans_horz, 1 - trans_horz], transform=fig.transFigure, figure=fig ) fig.lines.extend([left, right, bottom, top]) if centers: vert = matplotlib.lines.Line2D( [0.5, 0.5], [0, 1], transform=fig.transFigure, figure=fig, c="r" ) horiz = matplotlib.lines.Line2D( [0, 1], [0.5, 0.5], transform=fig.transFigure, figure=fig, c="r" ) fig.lines.extend([vert, horiz]) if edges: left = matplotlib.lines.Line2D( [0, 0], [0, 1], transform=fig.transFigure, figure=fig, c="k" ) right = matplotlib.lines.Line2D( [1, 1], [0, 1], transform=fig.transFigure, figure=fig, c="k" ) bottom = matplotlib.lines.Line2D( [0, 1], [0, 0], transform=fig.transFigure, figure=fig, c="k" ) top = matplotlib.lines.Line2D([0, 1], [1, 1], transform=fig.transFigure, figure=fig, c="k") fig.lines.extend([left, right, bottom, top])
Add lines onto a figure indicating the margins, centers, and edges. Useful for ensuring your figure design scripts work as intended, and for laying out figures. Parameters ---------- fig : matplotlib.figure.Figure object (optional) The figure to plot onto. If None, gets current figure. Default is None. inches : float (optional) The size of the figure margin, in inches. Default is 1. centers : bool (optional) Toggle for plotting lines indicating the figure center. Default is True. edges : bool (optional) Toggle for plotting lines indicating the figure edges. Default is True.
def generate_file_from_template(self, template, destination): """Generate a file from a Jinja2 `template` and writes it to `destination` using `params`. `overwrite` allows to overwrite existing files. It is passed to the `generate` method. This is used by the different init implementations to generate init scripts/configs and deploy them to the relevant directories. Templates are looked up under init/templates/`template`. If the `destination` directory doesn't exist, it will alert the user and exit. We don't want to be creating any system related directories out of the blue. The exception to the rule is with nssm. While it may seem a bit weird, not all relevant directories exist out of the box. For instance, `/etc/sysconfig` doesn't necessarily exist even if systemd is used by default. """ # We cast the object to a string before passing it on as py3.x # will fail on Jinja2 if there are ints/bytes (not strings) in the # template which will not allow `env.from_string(template)` to # take place. templates = str(pkgutil.get_data(__name__, os.path.join( 'templates', template))) pretty_params = json.dumps(self.params, indent=4, sort_keys=True) self.logger.debug( 'Rendering %s with params: %s...', template, pretty_params) generated = jinja2.Environment().from_string( templates).render(self.params) self.logger.debug('Writing generated file to %s...', destination) self._should_overwrite(destination) with open(destination, 'w') as f: f.write(generated) self.files.append(destination)
Generate a file from a Jinja2 `template` and writes it to `destination` using `params`. `overwrite` allows to overwrite existing files. It is passed to the `generate` method. This is used by the different init implementations to generate init scripts/configs and deploy them to the relevant directories. Templates are looked up under init/templates/`template`. If the `destination` directory doesn't exist, it will alert the user and exit. We don't want to be creating any system related directories out of the blue. The exception to the rule is with nssm. While it may seem a bit weird, not all relevant directories exist out of the box. For instance, `/etc/sysconfig` doesn't necessarily exist even if systemd is used by default.
def difference(self, instrument1, instrument2, bounds, data_labels, cost_function): """ Calculates the difference in signals from multiple instruments within the given bounds. Parameters ---------- instrument1 : Instrument Information must already be loaded into the instrument. instrument2 : Instrument Information must already be loaded into the instrument. bounds : list of tuples in the form (inst1_label, inst2_label, min, max, max_difference) inst1_label are inst2_label are labels for the data in instrument1 and instrument2 min and max are bounds on the data considered max_difference is the maximum difference between two points for the difference to be calculated data_labels : list of tuples of data labels The first key is used to access data in s1 and the second data in s2. cost_function : function function that operates on two rows of the instrument data. used to determine the distance between two points for finding closest points Returns ------- data_df: pandas DataFrame Each row has a point from instrument1, with the keys preceded by '1_', and a point within bounds on that point from instrument2 with the keys preceded by '2_', and the difference between the instruments' data for all the labels in data_labels Created as part of a Spring 2018 UTDesign project. """ """ Draft Pseudocode ---------------- Check integrity of inputs. Let STD_LABELS be the constant tuple: ("time", "lat", "long", "alt") Note: modify so that user can override labels for time, lat, long, data for each satelite. // We only care about the data currently loaded into each object. Let start be the later of the datetime of the first piece of data loaded into s1, the first piece of data loaded into s2, and the user supplied start bound. Let end be the later of the datetime of the first piece of data loaded into s1, the first piece of data loaded into s2, and the user supplied end bound. If start is after end, raise an error. // Let data be the 2D array of deques holding each piece // of data, sorted into bins by lat/long/alt. Let s1_data (resp s2_data) be data from s1.data, s2.data filtered by user-provided lat/long/alt bounds, time bounds calculated. Let data be a dictionary of lists with the keys [ dl1 for dl1, dl2 in data_labels ] + STD_LABELS + [ lb+"2" for lb in STD_LABELS ] For each piece of data s1_point in s1_data: # Hopefully np.where is very good, because this # runs O(n) times. # We could try reusing selections, maybe, if needed. # This would probably involve binning. Let s2_near be the data from s2.data within certain bounds on lat/long/alt/time using 8 statements to numpy.where. We can probably get those defaults from the user or handy constants / config? # We could try a different algorithm for closest pairs # of points. Let distance be the numpy array representing the distance between s1_point and each point in s2_near. # S: Difference for others: change this line. For each of those, calculate the spatial difference from the s1 using lat/long/alt. If s2_near is empty; break loop. Let s2_nearest be the point in s2_near corresponding to the lowest distance. Append to data: a point, indexed by the time from s1_point, containing the following data: # note Let n be the length of data["time"]. For each key in data: Assert len(data[key]) == n End for. # Create data row to pass to pandas. Let row be an empty dict. For dl1, dl2 in data_labels: Append s1_point[dl1] - s2_nearest[dl2] to data[dl1]. For key in STD_LABELS: Append s1_point[translate[key]] to data[key] key = key+"2" Append s2_nearest[translate[key]] to data[key] Let data_df be a pandas dataframe created from the data in data. return { 'data': data_df, 'start':start, 'end':end } """ labels = [dl1 for dl1, dl2 in data_labels] + ['1_'+b[0] for b in bounds] + ['2_'+b[1] for b in bounds] + ['dist'] data = {label: [] for label in labels} # Apply bounds inst1 = instrument1.data inst2 = instrument2.data for b in bounds: label1 = b[0] label2 = b[1] low = b[2] high = b[3] data1 = inst1[label1] ind1 = np.where((data1 >= low) & (data1 < high)) inst1 = inst1.iloc[ind1] data2 = inst2[label2] ind2 = np.where((data2 >= low) & (data2 < high)) inst2 = inst2.iloc[ind2] for i, s1_point in inst1.iterrows(): # Gets points in instrument2 within the given bounds s2_near = instrument2.data for b in bounds: label1 = b[0] label2 = b[1] s1_val = s1_point[label1] max_dist = b[4] minbound = s1_val - max_dist maxbound = s1_val + max_dist data2 = s2_near[label2] indices = np.where((data2 >= minbound) & (data2 < maxbound)) s2_near = s2_near.iloc[indices] # Finds nearest point to s1_point in s2_near s2_nearest = None min_dist = float('NaN') for j, s2_point in s2_near.iterrows(): dist = cost_function(s1_point, s2_point) if dist < min_dist or min_dist != min_dist: min_dist = dist s2_nearest = s2_point data['dist'].append(min_dist) # Append difference to data dict for dl1, dl2 in data_labels: if s2_nearest is not None: data[dl1].append(s1_point[dl1] - s2_nearest[dl2]) else: data[dl1].append(float('NaN')) # Append the rest of the row for b in bounds: label1 = b[0] label2 = b[1] data['1_'+label1].append(s1_point[label1]) if s2_nearest is not None: data['2_'+label2].append(s2_nearest[label2]) else: data['2_'+label2].append(float('NaN')) data_df = pds.DataFrame(data=data) return data_df
Calculates the difference in signals from multiple instruments within the given bounds. Parameters ---------- instrument1 : Instrument Information must already be loaded into the instrument. instrument2 : Instrument Information must already be loaded into the instrument. bounds : list of tuples in the form (inst1_label, inst2_label, min, max, max_difference) inst1_label are inst2_label are labels for the data in instrument1 and instrument2 min and max are bounds on the data considered max_difference is the maximum difference between two points for the difference to be calculated data_labels : list of tuples of data labels The first key is used to access data in s1 and the second data in s2. cost_function : function function that operates on two rows of the instrument data. used to determine the distance between two points for finding closest points Returns ------- data_df: pandas DataFrame Each row has a point from instrument1, with the keys preceded by '1_', and a point within bounds on that point from instrument2 with the keys preceded by '2_', and the difference between the instruments' data for all the labels in data_labels Created as part of a Spring 2018 UTDesign project.
def create(self, name, instance_dir=None, config='solrconfig.xml', schema='schema.xml'): """http://wiki.apache.org/solr/CoreAdmin#head-7ca1b98a9df8b8ca0dcfbfc49940ed5ac98c4a08""" params = { 'action': 'CREATE', 'name': name, 'config': config, 'schema': schema, } if instance_dir is None: params.update(instanceDir=name) else: params.update(instanceDir=instance_dir) return self._get_url(self.url, params=params)
http://wiki.apache.org/solr/CoreAdmin#head-7ca1b98a9df8b8ca0dcfbfc49940ed5ac98c4a08
def team_absent(name, profile="github", **kwargs): ''' Ensure a team is absent. Example: .. code-block:: yaml ensure team test is present in github: github.team_absent: - name: 'test' The following parameters are required: name This is the name of the team in the organization. .. versionadded:: 2016.11.0 ''' ret = { 'name': name, 'changes': {}, 'result': None, 'comment': '' } target = __salt__['github.get_team'](name, profile=profile, **kwargs) if not target: ret['comment'] = 'Team {0} does not exist'.format(name) ret['result'] = True return ret else: if __opts__['test']: ret['comment'] = "Team {0} will be deleted".format(name) ret['result'] = None return ret result = __salt__['github.remove_team'](name, profile=profile, **kwargs) if result: ret['comment'] = 'Deleted team {0}'.format(name) ret['changes'].setdefault('old', 'Team {0} exists'.format(name)) ret['changes'].setdefault('new', 'Team {0} deleted'.format(name)) ret['result'] = True else: ret['comment'] = 'Failed to delete {0}'.format(name) ret['result'] = False return ret
Ensure a team is absent. Example: .. code-block:: yaml ensure team test is present in github: github.team_absent: - name: 'test' The following parameters are required: name This is the name of the team in the organization. .. versionadded:: 2016.11.0
def GetOrderKey(self): """Return a tuple that can be used to sort problems into a consistent order. Returns: A list of values. """ context_attributes = ['_type'] context_attributes.extend(ExceptionWithContext.CONTEXT_PARTS) context_attributes.extend(self._GetExtraOrderAttributes()) tokens = [] for context_attribute in context_attributes: tokens.append(getattr(self, context_attribute, None)) return tokens
Return a tuple that can be used to sort problems into a consistent order. Returns: A list of values.
def get_verba_link_for_schedule(self, schedule): """ Returns a link to verba. The link varies by campus and schedule. Multiple calls to this with the same schedule may result in different urls. """ dao = Book_DAO() url = self.get_verba_url(schedule) response = dao.getURL(url, {"Accept": "application/json"}) if response.status != 200: raise DataFailureException(url, response.status, response.data) data = json.loads(response.data) for key in data: if re.match(r'^[A-Z]{2}[0-9]{5}$', key): return "%s%s&quarter=%s" % (BOOK_PREFIX, key, schedule.term.quarter)
Returns a link to verba. The link varies by campus and schedule. Multiple calls to this with the same schedule may result in different urls.
def _slice_mostly_sorted(array, keep, rest, ind=None): """Slice dask array `array` that is almost entirely sorted already. We perform approximately `2 * len(keep)` slices on `array`. This is OK, since `keep` is small. Individually, each of these slices is entirely sorted. Parameters ---------- array : dask.array.Array keep : ndarray[Int] This must be sorted. rest : ndarray[Bool] ind : ndarray[Int], optional Returns ------- sliced : dask.array.Array """ if ind is None: ind = np.arange(len(array)) idx = np.argsort(np.concatenate([keep, ind[rest]])) slices = [] if keep[0] > 0: # avoid creating empty slices slices.append(slice(None, keep[0])) slices.append([keep[0]]) windows = zip(keep[:-1], keep[1:]) for l, r in windows: if r > l + 1: # avoid creating empty slices slices.append(slice(l + 1, r)) slices.append([r]) if keep[-1] < len(array) - 1: # avoid creating empty slices slices.append(slice(keep[-1] + 1, None)) result = da.concatenate([array[idx[slice_]] for slice_ in slices]) return result
Slice dask array `array` that is almost entirely sorted already. We perform approximately `2 * len(keep)` slices on `array`. This is OK, since `keep` is small. Individually, each of these slices is entirely sorted. Parameters ---------- array : dask.array.Array keep : ndarray[Int] This must be sorted. rest : ndarray[Bool] ind : ndarray[Int], optional Returns ------- sliced : dask.array.Array
def get_capabilities_by_type(self, strict_type_matching: bool = False) -> Dict[Type, Dict[str, Dict[str, Parser]]]: """ For all types that are supported, lists all extensions that can be parsed into such a type. For each extension, provides the list of parsers supported. The order is "most pertinent first" This method is for monitoring and debug, so we prefer to not rely on the cache, but rather on the query engine. That will ensure consistency of the results. :param strict_type_matching: :return: """ check_var(strict_type_matching, var_types=bool, var_name='strict_matching') res = dict() # List all types that can be parsed for typ in self.get_all_supported_types(): res[typ] = self.get_capabilities_for_type(typ, strict_type_matching) return res
For all types that are supported, lists all extensions that can be parsed into such a type. For each extension, provides the list of parsers supported. The order is "most pertinent first" This method is for monitoring and debug, so we prefer to not rely on the cache, but rather on the query engine. That will ensure consistency of the results. :param strict_type_matching: :return:
def rasterToPolygon(raster_file, polygon_file): """ Converts watershed raster to polygon and then dissolves it. It dissolves features based on the LINKNO attribute. """ log("Process: Raster to Polygon ...") time_start = datetime.utcnow() temp_polygon_file = \ "{0}_temp.shp".format( os.path.splitext(os.path.basename(polygon_file))[0]) GDALGrid(raster_file).to_polygon(out_shapefile=temp_polygon_file, fieldname="LINKNO", self_mask=True) log("Time to convert to polygon: {0}" .format(datetime.utcnow()-time_start)) log("Dissolving ...") time_start_dissolve = datetime.utcnow() ogr_polygin_shapefile = ogr.Open(temp_polygon_file) ogr_polygon_shapefile_lyr = ogr_polygin_shapefile.GetLayer() number_of_features = ogr_polygon_shapefile_lyr.GetFeatureCount() polygon_rivid_list = np.zeros(number_of_features, dtype=np.int32) for feature_idx, catchment_feature in \ enumerate(ogr_polygon_shapefile_lyr): polygon_rivid_list[feature_idx] = \ catchment_feature.GetField('LINKNO') shp_drv = ogr.GetDriverByName('ESRI Shapefile') # Remove output shapefile if it already exists if os.path.exists(polygon_file): shp_drv.DeleteDataSource(polygon_file) dissolve_shapefile = shp_drv.CreateDataSource(polygon_file) dissolve_layer = \ dissolve_shapefile.CreateLayer( '', ogr_polygon_shapefile_lyr.GetSpatialRef(), ogr.wkbPolygon) dissolve_layer.CreateField(ogr.FieldDefn('LINKNO', ogr.OFTInteger)) dissolve_layer_defn = dissolve_layer.GetLayerDefn() for unique_rivid in np.unique(polygon_rivid_list): # get indices where it is in the polygon feature_indices = np.where(polygon_rivid_list == unique_rivid)[0] new_feat = ogr.Feature(dissolve_layer_defn) new_feat.SetField('LINKNO', int(unique_rivid)) if len(feature_indices) == 1: # write feature to file feature = \ ogr_polygon_shapefile_lyr.GetFeature(feature_indices[0]) new_feat.SetGeometry(feature.GetGeometryRef()) else: # dissolve dissolve_poly_list = [] for feature_index in feature_indices: feature = \ ogr_polygon_shapefile_lyr.GetFeature(feature_index) feat_geom = feature.GetGeometryRef() dissolve_poly_list.append( shapely_loads(feat_geom.ExportToWkb())) dissolve_polygon = cascaded_union(dissolve_poly_list) new_feat.SetGeometry( ogr.CreateGeometryFromWkb(dissolve_polygon.wkb)) dissolve_layer.CreateFeature(new_feat) # clean up shp_drv.DeleteDataSource(temp_polygon_file) log("Time to dissolve: {0}".format(datetime.utcnow() - time_start_dissolve)) log("Total time to convert: {0}".format(datetime.utcnow() - time_start))
Converts watershed raster to polygon and then dissolves it. It dissolves features based on the LINKNO attribute.