positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def _CalculateStorageCounters(self, storage_reader): """Calculates the counters of the entire storage. Args: storage_reader (StorageReader): storage reader. Returns: dict[str,collections.Counter]: storage counters. """ analysis_reports_counter = collections.Counter() analysis_reports_counter_error = False event_labels_counter = collections.Counter() event_labels_counter_error = False parsers_counter = collections.Counter() parsers_counter_error = False for session in storage_reader.GetSessions(): # Check for a dict for backwards compatibility. if isinstance(session.analysis_reports_counter, dict): analysis_reports_counter += collections.Counter( session.analysis_reports_counter) elif isinstance(session.analysis_reports_counter, collections.Counter): analysis_reports_counter += session.analysis_reports_counter else: analysis_reports_counter_error = True # Check for a dict for backwards compatibility. if isinstance(session.event_labels_counter, dict): event_labels_counter += collections.Counter( session.event_labels_counter) elif isinstance(session.event_labels_counter, collections.Counter): event_labels_counter += session.event_labels_counter else: event_labels_counter_error = True # Check for a dict for backwards compatibility. if isinstance(session.parsers_counter, dict): parsers_counter += collections.Counter(session.parsers_counter) elif isinstance(session.parsers_counter, collections.Counter): parsers_counter += session.parsers_counter else: parsers_counter_error = True storage_counters = {} warnings_by_path_spec = collections.Counter() warnings_by_parser_chain = collections.Counter() for warning in list(storage_reader.GetWarnings()): warnings_by_path_spec[warning.path_spec.comparable] += 1 warnings_by_parser_chain[warning.parser_chain] += 1 storage_counters['warnings_by_path_spec'] = warnings_by_path_spec storage_counters['warnings_by_parser_chain'] = warnings_by_parser_chain if not analysis_reports_counter_error: storage_counters['analysis_reports'] = analysis_reports_counter if not event_labels_counter_error: storage_counters['event_labels'] = event_labels_counter if not parsers_counter_error: storage_counters['parsers'] = parsers_counter return storage_counters
Calculates the counters of the entire storage. Args: storage_reader (StorageReader): storage reader. Returns: dict[str,collections.Counter]: storage counters.
def _exec(cmd): """Execute command using subprocess.Popen :param cmd: :return: (code, stdout, stderr) """ process = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE) # pylint: disable=unexpected-keyword-arg (stdout, stderr) = process.communicate(timeout=defaults.DEFAULT_VCS_TIMEOUT) return process.returncode, stdout.decode(), stderr.decode()
Execute command using subprocess.Popen :param cmd: :return: (code, stdout, stderr)
def _load_diff(args, extra_opts): """ :param args: :class:`argparse.Namespace` object :param extra_opts: Map object given to API.load as extra options """ try: diff = API.load(args.inputs, args.itype, ac_ignore_missing=args.ignore_missing, ac_merge=args.merge, ac_template=args.template, ac_schema=args.schema, **extra_opts) except API.UnknownProcessorTypeError: _exit_with_output("Wrong input type '%s'" % args.itype, 1) except API.UnknownFileTypeError: _exit_with_output("No appropriate backend was found for given file " "'%s'" % args.itype, 1) _exit_if_load_failure(diff, "Failed to load: args=%s" % ", ".join(args.inputs)) return diff
:param args: :class:`argparse.Namespace` object :param extra_opts: Map object given to API.load as extra options
def lru_cache(maxsize=100, typed=False): """Least-recently-used cache decorator. If *maxsize* is set to None, the LRU features are disabled and the cache can grow without bound. If *typed* is True, arguments of different types will be cached separately. For example, f(3.0) and f(3) will be treated as distinct calls with distinct results. Arguments to the cached function must be hashable. View the cache statistics named tuple (hits, misses, maxsize, currsize) with f.cache_info(). Clear the cache and statistics with f.cache_clear(). Access the underlying function with f.__wrapped__. See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used """ # Users should only access the lru_cache through its public API: # cache_info, cache_clear, and f.__wrapped__ # The internals of the lru_cache are encapsulated for thread safety and # to allow the implementation to change (including a possible C version). def decorating_function(user_function): cache = dict() stats = [0, 0] # make statistics updateable non-locally HITS, MISSES = 0, 1 # names for the stats fields kwd_mark = (object(),) # separate positional and keyword args cache_get = cache.get # bound method to lookup key or return None _len = len # localize the global len() function lock = Lock() # because linkedlist updates aren't threadsafe root = [] # root of the circular doubly linked list nonlocal_root = [root] # make updateable non-locally root[:] = [root, root, None, None] # initialize by pointing to self PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields def make_key(args, kwds, typed, tuple=tuple, sorted=sorted, type=type): # helper function to build a cache key from positional and keyword args key = args if kwds: sorted_items = tuple(sorted(kwds.items())) key += kwd_mark + sorted_items if typed: key += tuple(type(v) for v in args) if kwds: key += tuple(type(v) for k, v in sorted_items) return key if maxsize == 0: def wrapper(*args, **kwds): # no caching, just do a statistics update after a successful call result = user_function(*args, **kwds) stats[MISSES] += 1 return result elif maxsize is None: def wrapper(*args, **kwds): # simple caching without ordering or size limit key = make_key(args, kwds, typed) if kwds or typed else args result = cache_get(key, root) # root used here as a unique not-found sentinel if result is not root: stats[HITS] += 1 return result result = user_function(*args, **kwds) cache[key] = result stats[MISSES] += 1 return result else: def wrapper(*args, **kwds): # size limited caching that tracks accesses by recency key = make_key(args, kwds, typed) if kwds or typed else args with lock: link = cache_get(key) if link is not None: # record recent use of the key by moving it to the front of the list root, = nonlocal_root link_prev, link_next, key, result = link link_prev[NEXT] = link_next link_next[PREV] = link_prev last = root[PREV] last[NEXT] = root[PREV] = link link[PREV] = last link[NEXT] = root stats[HITS] += 1 return result result = user_function(*args, **kwds) with lock: root = nonlocal_root[0] if _len(cache) < maxsize: # put result in a new link at the front of the list last = root[PREV] link = [last, root, key, result] cache[key] = last[NEXT] = root[PREV] = link else: # use root to store the new key and result root[KEY] = key root[RESULT] = result cache[key] = root # empty the oldest link and make it the new root root = nonlocal_root[0] = root[NEXT] del cache[root[KEY]] root[KEY] = None root[RESULT] = None stats[MISSES] += 1 return result def cache_info(): """Report cache statistics""" with lock: return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache)) def cache_clear(): """Clear the cache and cache statistics""" with lock: cache.clear() root = nonlocal_root[0] root[:] = [root, root, None, None] stats[:] = [0, 0] wrapper.__wrapped__ = user_function wrapper.cache_info = cache_info wrapper.cache_clear = cache_clear return update_wrapper(wrapper, user_function) return decorating_function
Least-recently-used cache decorator. If *maxsize* is set to None, the LRU features are disabled and the cache can grow without bound. If *typed* is True, arguments of different types will be cached separately. For example, f(3.0) and f(3) will be treated as distinct calls with distinct results. Arguments to the cached function must be hashable. View the cache statistics named tuple (hits, misses, maxsize, currsize) with f.cache_info(). Clear the cache and statistics with f.cache_clear(). Access the underlying function with f.__wrapped__. See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
def close(self): """ Closes all resources/backends associated with this queue manager. """ self.log.info("Shutting down queue manager.") if hasattr(self.store, 'close'): self.store.close() if hasattr(self.subscriber_scheduler, 'close'): self.subscriber_scheduler.close() if hasattr(self.queue_scheduler, 'close'): self.queue_scheduler.close()
Closes all resources/backends associated with this queue manager.
def archive_filenames(self): """Return the list of files inside an archive file.""" try: return _bfd.archive_list_filenames(self._ptr) except TypeError, err: raise BfdException(err)
Return the list of files inside an archive file.
def transition_counts(alpha, beta, A, pobs, T=None, out=None): """ Sum for all t the probability to transition from state i to state j. Parameters ---------- alpha : ndarray((T,N), dtype = float), optional, default = None alpha[t,i] is the ith forward coefficient of time t. beta : ndarray((T,N), dtype = float), optional, default = None beta[t,i] is the ith forward coefficient of time t. A : ndarray((N,N), dtype = float) transition matrix of the hidden states pobs : ndarray((T,N), dtype = float) pobs[t,i] is the observation probability for observation at time t given hidden state i T : int number of time steps out : ndarray((N,N), dtype = float), optional, default = None containter for the resulting count matrix. If None, a new matrix will be created. Returns ------- counts : numpy.array shape (N, N) counts[i, j] is the summed probability to transition from i to j in time [0,T) See Also -------- forward : calculate forward coefficients `alpha` backward : calculate backward coefficients `beta` """ if __impl__ == __IMPL_PYTHON__: return ip.transition_counts(alpha, beta, A, pobs, T=T, out=out, dtype=config.dtype) elif __impl__ == __IMPL_C__: return ic.transition_counts(alpha, beta, A, pobs, T=T, out=out, dtype=config.dtype) else: raise RuntimeError('Nonexisting implementation selected: '+str(__impl__))
Sum for all t the probability to transition from state i to state j. Parameters ---------- alpha : ndarray((T,N), dtype = float), optional, default = None alpha[t,i] is the ith forward coefficient of time t. beta : ndarray((T,N), dtype = float), optional, default = None beta[t,i] is the ith forward coefficient of time t. A : ndarray((N,N), dtype = float) transition matrix of the hidden states pobs : ndarray((T,N), dtype = float) pobs[t,i] is the observation probability for observation at time t given hidden state i T : int number of time steps out : ndarray((N,N), dtype = float), optional, default = None containter for the resulting count matrix. If None, a new matrix will be created. Returns ------- counts : numpy.array shape (N, N) counts[i, j] is the summed probability to transition from i to j in time [0,T) See Also -------- forward : calculate forward coefficients `alpha` backward : calculate backward coefficients `beta`
def setedge(delta, is_multigraph, graph, orig, dest, idx, exists): """Change a delta to say that an edge was created or deleted""" if is_multigraph(graph): delta.setdefault(graph, {}).setdefault('edges', {})\ .setdefault(orig, {}).setdefault(dest, {})[idx] = bool(exists) else: delta.setdefault(graph, {}).setdefault('edges', {})\ .setdefault(orig, {})[dest] = bool(exists)
Change a delta to say that an edge was created or deleted
def download_attachments(self, dataset_identifier, content_type="json", download_dir="~/sodapy_downloads"): ''' Download all of the attachments associated with a dataset. Return the paths of downloaded files. ''' metadata = self.get_metadata(dataset_identifier, content_type=content_type) files = [] attachments = metadata['metadata'].get("attachments") if not attachments: logging.info("No attachments were found or downloaded.") return files download_dir = os.path.join(os.path.expanduser(download_dir), dataset_identifier) if not os.path.exists(download_dir): os.makedirs(download_dir) for attachment in attachments: file_path = os.path.join(download_dir, attachment["filename"]) has_assetid = attachment.get("assetId", False) if has_assetid: base = _format_old_api_request(dataid=dataset_identifier) assetid = attachment["assetId"] resource = "{0}/files/{1}?download=true&filename={2}"\ .format(base, assetid, attachment["filename"]) else: base = "/api/assets" assetid = attachment["blobId"] resource = "{0}/{1}?download=true".format(base, assetid) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) _download_file(uri, file_path) files.append(file_path) logging.info("The following files were downloaded:\n\t{0}".format("\n\t".join(files))) return files
Download all of the attachments associated with a dataset. Return the paths of downloaded files.
def get_password(vm_): r''' Return the password to use for a VM. vm\_ The configuration to obtain the password from. ''' return config.get_cloud_config_value( 'password', vm_, __opts__, default=config.get_cloud_config_value( 'passwd', vm_, __opts__, search_global=False ), search_global=False )
r''' Return the password to use for a VM. vm\_ The configuration to obtain the password from.
def _smart_separate_groups(groups, key, total): """Given a list of group objects, and a function to extract the number of elements for each of them, return the list of groups that have an excessive number of elements (when compared to a uniform distribution), a list of groups with insufficient elements, and a list of groups that already have the optimal number of elements. :param list groups: list of group objects :param func key: function to retrieve the current number of elements from the group object :param int total: total number of elements to distribute Example: .. code-block:: python smart_separate_groups([11, 9, 10, 14], lambda g: g) => ([14], [10, 9], [11]) """ optimum, extra = compute_optimum(len(groups), total) over_loaded, under_loaded, optimal = [], [], [] for group in sorted(groups, key=key, reverse=True): n_elements = key(group) additional_element = 1 if extra else 0 if n_elements > optimum + additional_element: over_loaded.append(group) elif n_elements == optimum + additional_element: optimal.append(group) elif n_elements < optimum + additional_element: under_loaded.append(group) extra -= additional_element return over_loaded, under_loaded, optimal
Given a list of group objects, and a function to extract the number of elements for each of them, return the list of groups that have an excessive number of elements (when compared to a uniform distribution), a list of groups with insufficient elements, and a list of groups that already have the optimal number of elements. :param list groups: list of group objects :param func key: function to retrieve the current number of elements from the group object :param int total: total number of elements to distribute Example: .. code-block:: python smart_separate_groups([11, 9, 10, 14], lambda g: g) => ([14], [10, 9], [11])
def connected(self, msg): """Once I've connected I want to subscribe to my the message queue. """ stomper.Engine.connected(self, msg) self.log.info("Connected: session %s. Beginning say hello." % msg['headers']['session']) def setup_looping_call(): lc = LoopingCall(self.send) lc.start(2) reactor.callLater(1, setup_looping_call) f = stomper.Frame() f.unpack(stomper.subscribe(DESTINATION)) # ActiveMQ specific headers: # # prevent the messages we send comming back to us. f.headers['activemq.noLocal'] = 'true' return f.pack()
Once I've connected I want to subscribe to my the message queue.
def get_random_hex(length): """ Return random hex string of a given length """ if length <= 0: return '' return hexify(random.randint(pow(2, length*2), pow(2, length*4)))[0:length]
Return random hex string of a given length
def wait_time(self, value): """ Setter for **self.__wait_time** attribute. :param value: Attribute value. :type value: int or float """ if value is not None: assert type(value) in (int, float), "'{0}' attribute: '{1}' type is not 'int' or 'float'!".format( "wait_time", value) assert value >= 0, "'{0}' attribute: '{1}' need to be positive!".format("wait_time", value) self.__wait_time = value
Setter for **self.__wait_time** attribute. :param value: Attribute value. :type value: int or float
def get_objective_requisite_assignment_session(self, proxy): """Gets the session for managing objective requisites. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.learning.ObjectiveRequisiteAssignmentSession) - an ``ObjectiveRequisiteAssignmentSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_objective_requisite_assignment()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_objective_requisite_assignment()`` is ``true``.* """ if not self.supports_objective_requisite_assignment(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.ObjectiveRequisiteAssignmentSession(proxy=proxy, runtime=self._runtime)
Gets the session for managing objective requisites. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.learning.ObjectiveRequisiteAssignmentSession) - an ``ObjectiveRequisiteAssignmentSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_objective_requisite_assignment()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_objective_requisite_assignment()`` is ``true``.*
def get_wake_on_network(): ''' Displays whether 'wake on network' is on or off if supported :return: A string value representing the "wake on network" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_wake_on_network ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonnetworkaccess') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on'
Displays whether 'wake on network' is on or off if supported :return: A string value representing the "wake on network" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_wake_on_network
def execute(self, request, target_route): """ :meth:`.WWebServiceProto.execute` method implementation """ presenter = self.create_presenter(request, target_route) presenter_name = target_route.presenter_name() action_name = target_route.presenter_action() presenter_args = target_route.presenter_args() if hasattr(presenter, action_name) is False: raise RuntimeError('No such action "%s" for "%s" presenter' % (action_name, presenter_name)) action = getattr(presenter, action_name) if ismethod(action) is False: raise RuntimeError( 'Unable to execute "%s" action for "%s" presenter' % (action_name, presenter_name) ) args_spec = getfullargspec(action) defaults = len(args_spec.defaults) if args_spec.defaults is not None else 0 action_args = list() action_kwargs = dict() for i in range(len(args_spec.args)): arg = args_spec.args[i] if arg == 'self': continue is_kwarg = i >= (len(args_spec.args) - defaults) if is_kwarg is False: action_args.append(presenter_args[arg]) elif arg in presenter_args: action_kwargs[arg] = presenter_args[arg] return action(*action_args, **action_kwargs)
:meth:`.WWebServiceProto.execute` method implementation
def final_bounces(fetches, url): """ Resolves redirect chains in `fetches` and returns a list of fetches representing the final redirect destinations of the given url. There could be more than one if for example youtube-dl hit the same url with HEAD and then GET requests. """ redirects = {} for fetch in fetches: # XXX check http status 301,302,303,307? check for "uri" header # as well as "location"? see urllib.request.HTTPRedirectHandler if 'location' in fetch['response_headers']: redirects[fetch['url']] = fetch final_url = url while final_url in redirects: fetch = redirects.pop(final_url) final_url = urllib.parse.urljoin( fetch['url'], fetch['response_headers']['location']) final_bounces = [] for fetch in fetches: if fetch['url'] == final_url: final_bounces.append(fetch) return final_bounces
Resolves redirect chains in `fetches` and returns a list of fetches representing the final redirect destinations of the given url. There could be more than one if for example youtube-dl hit the same url with HEAD and then GET requests.
def _ReadUUIDDataTypeDefinition( self, definitions_registry, definition_values, definition_name, is_member=False): """Reads an UUID data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: UUIDDataTypeDefinition: UUID data type definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect. """ return self._ReadFixedSizeDataTypeDefinition( definitions_registry, definition_values, data_types.UUIDDefinition, definition_name, self._SUPPORTED_ATTRIBUTES_FIXED_SIZE_DATA_TYPE, default_size=16, is_member=is_member, supported_size_values=(16, ))
Reads an UUID data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: UUIDDataTypeDefinition: UUID data type definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect.
def get_unset_inputs(self): """ Return a set of unset inputs """ return set([k for k, v in self._inputs.items() if v.is_empty(False)])
Return a set of unset inputs
def set_cache_url (self): """Set the URL to be used for caching.""" # remove anchor from cached target url since we assume # URLs with different anchors to have the same content self.cache_url = urlutil.urlunsplit(self.urlparts[:4]+[u'']) if self.cache_url is not None: assert isinstance(self.cache_url, unicode), repr(self.cache_url)
Set the URL to be used for caching.
def _instance_parser(self, plugins): """ internal method to parse instances of plugins. Determines if each class is a class instance or object instance and calls the appropiate handler method. """ plugins = util.return_list(plugins) for instance in plugins: if inspect.isclass(instance): self._handle_class_instance(instance) else: self._handle_object_instance(instance)
internal method to parse instances of plugins. Determines if each class is a class instance or object instance and calls the appropiate handler method.
def stride(self): """Step per axis between neighboring points of a uniform grid. If the grid contains axes that are not uniform, ``stride`` has a ``NaN`` entry. For degenerate (length 1) axes, ``stride`` has value ``0.0``. Returns ------- stride : numpy.array Array of dtype ``float`` and length `ndim`. Examples -------- >>> rg = uniform_grid([-1.5, -1], [-0.5, 3], (2, 3)) >>> rg.stride array([ 1., 2.]) NaN returned for non-uniform dimension: >>> g = RectGrid([0, 1, 2], [0, 1, 4]) >>> g.stride array([ 1., nan]) 0.0 returned for degenerate dimension: >>> g = RectGrid([0, 1, 2], [0]) >>> g.stride array([ 1., 0.]) """ # Cache for efficiency instead of re-computing if self.__stride is None: strd = [] for i in range(self.ndim): if not self.is_uniform_byaxis[i]: strd.append(float('nan')) elif self.nondegen_byaxis[i]: strd.append(self.extent[i] / (self.shape[i] - 1.0)) else: strd.append(0.0) self.__stride = np.array(strd) return self.__stride.copy()
Step per axis between neighboring points of a uniform grid. If the grid contains axes that are not uniform, ``stride`` has a ``NaN`` entry. For degenerate (length 1) axes, ``stride`` has value ``0.0``. Returns ------- stride : numpy.array Array of dtype ``float`` and length `ndim`. Examples -------- >>> rg = uniform_grid([-1.5, -1], [-0.5, 3], (2, 3)) >>> rg.stride array([ 1., 2.]) NaN returned for non-uniform dimension: >>> g = RectGrid([0, 1, 2], [0, 1, 4]) >>> g.stride array([ 1., nan]) 0.0 returned for degenerate dimension: >>> g = RectGrid([0, 1, 2], [0]) >>> g.stride array([ 1., 0.])
def left(ctx, text, num_chars): """ Returns the first characters in a text string """ num_chars = conversions.to_integer(num_chars, ctx) if num_chars < 0: raise ValueError("Number of chars can't be negative") return conversions.to_string(text, ctx)[0:num_chars]
Returns the first characters in a text string
def shrink(self, src, width=0, max_value=0, filter_method=None, path=None, flags=0): """Shrink sketch Params: <Sketch> src_sketch <int> width <int> max_value <lambda> | <function> filter <str> path <int> flags """ if filter_method: get_ = _madoka.Sketch_get__ set_ = _madoka.Sketch_set__ new_sketch = Sketch(width, max_value, path, flags, src.seed) for table_id in range(SKETCH_DEPTH): for offset in range(width, src.width, width): for cell_id in range(width): val = get_(src, table_id, offset + cell_id) val = filter_method(val) val = max_value if val > max_value else val if val > get_(new_sketch, table_id, cell_id): set_(new_sketch, table_id, cell_id, val) self.swap(new_sketch) else: _madoka.Sketch_shrink(self, src, width, max_value, None, path, flags)
Shrink sketch Params: <Sketch> src_sketch <int> width <int> max_value <lambda> | <function> filter <str> path <int> flags
def retrieve_order(self, order_id): """Retrieve details on a single order.""" response = self.request(E.retrieveOrderSslCertRequest( E.id(order_id) )) return response.as_model(SSLOrder)
Retrieve details on a single order.
def delete(self, request, id=None): """ Handles delete requests. """ if id: obj = get_object_or_404(self.queryset(request), id=id) if not self.has_delete_permission(request, obj): return HttpResponseForbidden(_('You do not have permission to perform this action.')) else: return self.delete_object(request, obj) else: # No delete requests allowed on collection view return HttpResponseForbidden()
Handles delete requests.
def ReadClientLastPings(self, min_last_ping=None, max_last_ping=None, fleetspeak_enabled=None, cursor=None): """Reads client ids for all clients in the database.""" query = "SELECT client_id, UNIX_TIMESTAMP(last_ping) FROM clients " query_values = [] where_filters = [] if min_last_ping is not None: where_filters.append("last_ping >= FROM_UNIXTIME(%s) ") query_values.append(mysql_utils.RDFDatetimeToTimestamp(min_last_ping)) if max_last_ping is not None: where_filters.append( "(last_ping IS NULL OR last_ping <= FROM_UNIXTIME(%s))") query_values.append(mysql_utils.RDFDatetimeToTimestamp(max_last_ping)) if fleetspeak_enabled is not None: if fleetspeak_enabled: where_filters.append("fleetspeak_enabled IS TRUE") else: where_filters.append( "(fleetspeak_enabled IS NULL OR fleetspeak_enabled IS FALSE)") if where_filters: query += "WHERE " + "AND ".join(where_filters) cursor.execute(query, query_values) last_pings = {} for int_client_id, last_ping in cursor.fetchall(): client_id = db_utils.IntToClientID(int_client_id) last_pings[client_id] = mysql_utils.TimestampToRDFDatetime(last_ping) return last_pings
Reads client ids for all clients in the database.
def require_option(current_ctx: click.Context, param_name: str) -> None: """Throw an exception if an option wasn't required. This is useful when its optional in some contexts but required for a subcommand""" ctx = current_ctx param_definition = None while ctx is not None: # ctx.command.params has the actual definition of the param. We use # this when raising the exception. param_definition = next( (p for p in ctx.command.params if p.name == param_name), None ) # ctx.params has the current value of the parameter, as set by the user. if ctx.params.get(param_name): return ctx = ctx.parent assert param_definition, f"unknown parameter {param_name}" raise click.MissingParameter(ctx=current_ctx, param=param_definition)
Throw an exception if an option wasn't required. This is useful when its optional in some contexts but required for a subcommand
def _copy_flaky_attributes(cls, test, test_class): """ Copy flaky attributes from the test callable or class to the test. :param test: The test that is being prepared to run :type test: :class:`nose.case.Test` """ test_callable = cls._get_test_callable(test) if test_callable is None: return for attr, value in cls._get_flaky_attributes(test_class).items(): already_set = hasattr(test, attr) if already_set: continue attr_on_callable = getattr(test_callable, attr, None) if attr_on_callable is not None: cls._set_flaky_attribute(test, attr, attr_on_callable) elif value is not None: cls._set_flaky_attribute(test, attr, value)
Copy flaky attributes from the test callable or class to the test. :param test: The test that is being prepared to run :type test: :class:`nose.case.Test`
def _get_expiry_timestamp(cls, session_server): """ :type session_server: core.SessionServer :rtype: datetime.datetime """ timeout_seconds = cls._get_session_timeout_seconds(session_server) time_now = datetime.datetime.now() return time_now + datetime.timedelta(seconds=timeout_seconds)
:type session_server: core.SessionServer :rtype: datetime.datetime
def convert_to_sympy_matrix(expr, full_space=None): """Convert a QNET expression to an explicit ``n x n`` instance of `sympy.Matrix`, where ``n`` is the dimension of `full_space`. The entries of the matrix may contain symbols. Parameters: expr: a QNET expression full_space (qnet.algebra.hilbert_space_algebra.HilbertSpace): The Hilbert space in which `expr` is defined. If not given, ``expr.space`` is used. The Hilbert space must have a well-defined basis. Raises: qnet.algebra.hilbert_space_algebra.BasisNotSetError: if `full_space` does not have a defined basis ValueError: if `expr` is not in `full_space`, or if `expr` cannot be converted. """ if full_space is None: full_space = expr.space if not expr.space.is_tensor_factor_of(full_space): raise ValueError("expr must be in full_space") if expr is IdentityOperator: return sympy.eye(full_space.dimension) elif expr is ZeroOperator: return 0 elif isinstance(expr, LocalOperator): n = full_space.dimension if full_space != expr.space: all_spaces = full_space.local_factors own_space_index = all_spaces.index(expr.space) factors = [sympy.eye(s.dimension) for s in all_spaces[:own_space_index]] factors.append(convert_to_sympy_matrix(expr, expr.space)) factors.extend([sympy.eye(s.dimension) for s in all_spaces[own_space_index + 1:]]) return tensor(*factors) if isinstance(expr, (Create, Jz, Jplus)): return SympyCreate(n) elif isinstance(expr, (Destroy, Jminus)): return SympyCreate(n).H elif isinstance(expr, Phase): phi = expr.phase result = sympy.zeros(n) for i in range(n): result[i, i] = sympy.exp(sympy.I * i * phi) return result elif isinstance(expr, Displace): alpha = expr.operands[1] a = SympyCreate(n) return (alpha * a - alpha.conjugate() * a.H).exp() elif isinstance(expr, Squeeze): eta = expr.operands[1] a = SympyCreate(n) return ((eta/2) * a**2 - (eta.conjugate()/2) * (a.H)**2).exp() elif isinstance(expr, LocalSigma): ket = basis_state(expr.index_j, n) bra = basis_state(expr.index_k, n).H return ket * bra else: raise ValueError("Cannot convert '%s' of type %s" % (str(expr), type(expr))) elif (isinstance(expr, Operator) and isinstance(expr, Operation)): if isinstance(expr, OperatorPlus): s = convert_to_sympy_matrix(expr.operands[0], full_space) for op in expr.operands[1:]: s += convert_to_sympy_matrix(op, full_space) return s elif isinstance(expr, OperatorTimes): # if any factor acts non-locally, we need to expand distributively. if any(len(op.space) > 1 for op in expr.operands): se = expr.expand() if se == expr: raise ValueError("Cannot represent as sympy matrix: %s" % expr) return convert_to_sympy_matrix(se, full_space) all_spaces = full_space.local_factors by_space = [] ck = 0 for ls in all_spaces: # group factors by associated local space ls_ops = [convert_to_sympy_matrix(o, o.space) for o in expr.operands if o.space == ls] if len(ls_ops): # compute factor associated with local space by_space.append(ls_ops[0]) for ls_op in ls_ops[1:]: by_space[-1] *= ls_op ck += len(ls_ops) else: # if trivial action, take identity matrix by_space.append(sympy.eye(ls.dimension)) assert ck == len(expr.operands) # combine local factors in tensor product if len(by_space) == 1: return by_space[0] else: return tensor(*by_space) elif isinstance(expr, Adjoint): return convert_to_sympy_matrix(expr.operand, full_space).H elif isinstance(expr, PseudoInverse): raise NotImplementedError( 'Cannot convert PseudoInverse to sympy matrix') elif isinstance(expr, NullSpaceProjector): raise NotImplementedError( 'Cannot convert NullSpaceProjector to sympy') elif isinstance(expr, ScalarTimesOperator): return expr.coeff * convert_to_sympy_matrix(expr.term, full_space) else: raise ValueError( "Cannot convert '%s' of type %s" % (str(expr), type(expr))) else: raise ValueError( "Cannot convert '%s' of type %s" % (str(expr), type(expr)))
Convert a QNET expression to an explicit ``n x n`` instance of `sympy.Matrix`, where ``n`` is the dimension of `full_space`. The entries of the matrix may contain symbols. Parameters: expr: a QNET expression full_space (qnet.algebra.hilbert_space_algebra.HilbertSpace): The Hilbert space in which `expr` is defined. If not given, ``expr.space`` is used. The Hilbert space must have a well-defined basis. Raises: qnet.algebra.hilbert_space_algebra.BasisNotSetError: if `full_space` does not have a defined basis ValueError: if `expr` is not in `full_space`, or if `expr` cannot be converted.
def add_shared_configs(p, base_dir=''): """Add configargparser/argparse configs for shared argument. Arguments: p - configargparse.ArgParser object base_dir - base directory for file/path defaults. """ p.add('--host', default='localhost', help="Service host") p.add('--port', '-p', type=int, default=8000, help="Service port") p.add('--app-host', default=None, help="Local application host for reverse proxy deployment, " "as opposed to service --host (must also specify --app-port)") p.add('--app-port', type=int, default=None, help="Local application port for reverse proxy deployment, " "as opposed to service --port (must also specify --app-host)") p.add('--image-dir', '-d', default=os.path.join(base_dir, 'testimages'), help="Image file directory") p.add('--generator-dir', default=os.path.join(base_dir, 'iiif/generators'), help="Generator directory for manipulator='gen'") p.add('--tile-height', type=int, default=512, help="Tile height") p.add('--tile-width', type=int, default=512, help="Tile width") p.add('--gauth-client-secret', default=os.path.join(base_dir, 'client_secret.json'), help="Name of file with Google auth client secret") p.add('--include-osd', action='store_true', help="Include a page with OpenSeadragon for each source") p.add('--access-cookie-lifetime', type=int, default=3600, help="Set access cookie lifetime for authenticated access in seconds") p.add('--access-token-lifetime', type=int, default=10, help="Set access token lifetime for authenticated access in seconds") p.add('--config', is_config_file=True, default=None, help='Read config from given file path') p.add('--debug', action='store_true', help="Set debug mode for web application. INSECURE!") p.add('--verbose', '-v', action='store_true', help="Be verbose") p.add('--quiet', '-q', action='store_true', help="Minimal output only")
Add configargparser/argparse configs for shared argument. Arguments: p - configargparse.ArgParser object base_dir - base directory for file/path defaults.
def policy_map_class_cl_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") policy_map = ET.SubElement(config, "policy-map", xmlns="urn:brocade.com:mgmt:brocade-policer") po_name_key = ET.SubElement(policy_map, "po-name") po_name_key.text = kwargs.pop('po_name') class_el = ET.SubElement(policy_map, "class_el") cl_name = ET.SubElement(class_el, "cl-name") cl_name.text = kwargs.pop('cl_name') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def load_tf_weights_in_openai_gpt(model, openai_checkpoint_folder_path): """ Load tf pre-trained weights in a pytorch model (from NumPy arrays here) """ import re import numpy as np print("Loading weights...") names = json.load(open(openai_checkpoint_folder_path + '/parameters_names.json', "r", encoding='utf-8')) shapes = json.load(open(openai_checkpoint_folder_path + '/params_shapes.json', "r", encoding='utf-8')) offsets = np.cumsum([np.prod(shape) for shape in shapes]) init_params = [np.load(openai_checkpoint_folder_path + '/params_{}.npy'.format(n)) for n in range(10)] init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1] init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)] # This was used when we had a single embedding matrix for positions and tokens # init_params[0] = np.concatenate([init_params[1], init_params[0]], 0) # del init_params[1] init_params = [arr.squeeze() for arr in init_params] try: assert model.tokens_embed.weight.shape == init_params[1].shape assert model.positions_embed.weight.shape == init_params[0].shape except AssertionError as e: e.args += (model.tokens_embed.weight.shape, init_params[1].shape) e.args += (model.positions_embed.weight.shape, init_params[0].shape) raise model.tokens_embed.weight.data = torch.from_numpy(init_params[1]) model.positions_embed.weight.data = torch.from_numpy(init_params[0]) names.pop(0) # Pop position and token embedding arrays init_params.pop(0) init_params.pop(0) for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]): name = name[6:] # skip "model/" assert name[-2:] == ":0" name = name[:-2] name = name.split('/') pointer = model for m_name in name: if re.fullmatch(r'[A-Za-z]+\d+', m_name): l = re.split(r'(\d+)', m_name) else: l = [m_name] if l[0] == 'g': pointer = getattr(pointer, 'weight') elif l[0] == 'b': pointer = getattr(pointer, 'bias') elif l[0] == 'w': pointer = getattr(pointer, 'weight') else: pointer = getattr(pointer, l[0]) if len(l) >= 2: num = int(l[1]) pointer = pointer[num] try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise print("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) return model
Load tf pre-trained weights in a pytorch model (from NumPy arrays here)
def listDevices(self, interface_id): """The CCU / Homegear asks for devices known to our XML-RPC server. We respond to that request using this method.""" LOG.debug("RPCFunctions.listDevices: interface_id = %s, _devices_raw = %s" % ( interface_id, str(self._devices_raw))) remote = interface_id.split('-')[-1] if remote not in self._devices_raw: self._devices_raw[remote] = [] if self.systemcallback: self.systemcallback('listDevices', interface_id) return self._devices_raw[remote]
The CCU / Homegear asks for devices known to our XML-RPC server. We respond to that request using this method.
def live_log_child(self): '''Start the logging child process if it died.''' if not (self.log_child and self.pid_is_alive(self.log_child)): self.start_log_child()
Start the logging child process if it died.
def preprocess( self, nb: "NotebookNode", resources: dict ) -> Tuple["NotebookNode", dict]: """Remove any raw cells from the Notebook. By default, exclude raw cells from the output. Change this by including global_content_filter->include_raw = True in the resources dictionary. This preprocessor is necessary because the NotebookExporter doesn't include the exclude_raw config.""" if not resources.get("global_content_filter", {}).get("include_raw", False): keep_cells = [] for cell in nb.cells: if cell.cell_type != "raw": keep_cells.append(cell) nb.cells = keep_cells return nb, resources
Remove any raw cells from the Notebook. By default, exclude raw cells from the output. Change this by including global_content_filter->include_raw = True in the resources dictionary. This preprocessor is necessary because the NotebookExporter doesn't include the exclude_raw config.
def write_to_path(self, path=None): """Write configuration to a file on disk.""" if path is None: path = self.path f = GitFile(path, 'wb') try: self.write_to_file(f) finally: f.close()
Write configuration to a file on disk.
def get_interfaces(device_name=None, **kwargs): ''' .. versionadded:: 2019.2.0 Returns interfaces for a specific device using arbitrary netbox filters device_name The name of the device, e.g., ``edge_router`` kwargs Optional arguments to be used for filtering CLI Example: .. code-block:: bash salt myminion netbox.get_interfaces edge_router name="et-0/0/5" ''' if not device_name: device_name = __opts__['id'] netbox_device = get_('dcim', 'devices', name=device_name) return filter_('dcim', 'interfaces', device_id=netbox_device['id'], **kwargs)
.. versionadded:: 2019.2.0 Returns interfaces for a specific device using arbitrary netbox filters device_name The name of the device, e.g., ``edge_router`` kwargs Optional arguments to be used for filtering CLI Example: .. code-block:: bash salt myminion netbox.get_interfaces edge_router name="et-0/0/5"
def rank(self, score): '''Return the 0-based index (rank) of ``score``. If the score is not available it returns a negative integer which absolute score is the right most closest index with score less than ``score``. ''' node = self._head rank = 0 for i in range(self._level-1, -1, -1): while node.next[i] and node.next[i].score < score: rank += node.width[i] node = node.next[i] node = node.next[0] if node and node.score == score: return rank else: return -2 - rank
Return the 0-based index (rank) of ``score``. If the score is not available it returns a negative integer which absolute score is the right most closest index with score less than ``score``.
def _maybe_init_tags(self, run_id, tag_to_metadata): """Returns a tag-to-ID map for the given tags, creating rows if needed. Args: run_id: the ID of the run to which these tags belong. tag_to_metadata: map of tag name to SummaryMetadata for the tag. """ cursor = self._db.cursor() # TODO: for huge numbers of tags (e.g. 1000+), this is slower than just # querying for the known tag names explicitly; find a better tradeoff. cursor.execute('SELECT tag_name, tag_id FROM Tags WHERE run_id = ?', (run_id,)) tag_to_id = {row[0]: row[1] for row in cursor.fetchall() if row[0] in tag_to_metadata} new_tag_data = [] for tag, metadata in six.iteritems(tag_to_metadata): if tag not in tag_to_id: tag_id = self._create_id() tag_to_id[tag] = tag_id new_tag_data.append((run_id, tag_id, tag, time.time(), metadata.display_name, metadata.plugin_data.plugin_name, self._make_blob(metadata.plugin_data.content))) cursor.executemany( """ INSERT INTO Tags ( run_id, tag_id, tag_name, inserted_time, display_name, plugin_name, plugin_data ) VALUES (?, ?, ?, ?, ?, ?, ?) """, new_tag_data) return tag_to_id
Returns a tag-to-ID map for the given tags, creating rows if needed. Args: run_id: the ID of the run to which these tags belong. tag_to_metadata: map of tag name to SummaryMetadata for the tag.
def chunked(sentence): """ Returns a list of Chunk and Chink objects from the given sentence. Chink is a subclass of Chunk used for words that have Word.chunk == None (e.g., punctuation marks, conjunctions). """ # For example, to construct a training vector with the head of previous chunks as a feature. # Doing this with Sentence.chunks would discard the punctuation marks and conjunctions # (Sentence.chunks only yields Chunk objects), which amy be useful features. chunks = [] for word in sentence: if word.chunk is not None: if len(chunks) == 0 or chunks[-1] != word.chunk: chunks.append(word.chunk) else: ch = Chink(sentence) ch.append(word.copy(ch)) chunks.append(ch) return chunks
Returns a list of Chunk and Chink objects from the given sentence. Chink is a subclass of Chunk used for words that have Word.chunk == None (e.g., punctuation marks, conjunctions).
def _get_aws_variables(self): """ Returns the AWS specific environment variables that should be available in the Lambda runtime. They are prefixed it "AWS_*". :return dict: Name and value of AWS environment variable """ result = { # Variable that says this function is running in Local Lambda "AWS_SAM_LOCAL": "true", # Function configuration "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": str(self.memory), "AWS_LAMBDA_FUNCTION_TIMEOUT": str(self.timeout), "AWS_LAMBDA_FUNCTION_HANDLER": str(self._function["handler"]), # AWS Credentials - Use the input credentials or use the defaults "AWS_REGION": self.aws_creds.get("region", self._DEFAULT_AWS_CREDS["region"]), "AWS_DEFAULT_REGION": self.aws_creds.get("region", self._DEFAULT_AWS_CREDS["region"]), "AWS_ACCESS_KEY_ID": self.aws_creds.get("key", self._DEFAULT_AWS_CREDS["key"]), "AWS_SECRET_ACCESS_KEY": self.aws_creds.get("secret", self._DEFAULT_AWS_CREDS["secret"]) # Additional variables we don't fill in # "AWS_ACCOUNT_ID=" # "AWS_LAMBDA_EVENT_BODY=", # "AWS_LAMBDA_FUNCTION_NAME=", # "AWS_LAMBDA_FUNCTION_VERSION=", } # Session Token should be added **only** if the input creds have a token and the value is not empty. if self.aws_creds.get("sessiontoken"): result["AWS_SESSION_TOKEN"] = self.aws_creds.get("sessiontoken") return result
Returns the AWS specific environment variables that should be available in the Lambda runtime. They are prefixed it "AWS_*". :return dict: Name and value of AWS environment variable
def setup_segment_generation(workflow, out_dir, tag=None): """ This function is the gateway for setting up the segment generation steps in a workflow. It is designed to be able to support multiple ways of obtaining these segments and to combine/edit such files as necessary for analysis. The current modules have the capability to generate files at runtime or to generate files that are not needed for workflow generation within the workflow. Parameters ----------- workflow : pycbc.workflow.core.Workflow The workflow instance that the coincidence jobs will be added to. This instance also contains the ifos for which to attempt to obtain segments for this analysis and the start and end times to search for segments over. out_dir : path The directory in which output will be stored. tag : string, optional (default=None) Use this to specify a tag. This can be used if this module is being called more than once to give call specific configuration (by setting options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). This is also used to tag the Files returned by the class to uniqueify the Files and uniqueify the actual filename. FIXME: Filenames may not be unique with current codes! Returns ------- segsToAnalyse : dictionay of ifo-keyed glue.segment.segmentlist instances This will contain the times that your code should analyse. By default this is science time - CAT_1 vetoes. (This default could be changed if desired) segFilesList : pycbc.workflow.core.FileList of SegFile instances These are representations of the various segment files that were constructed at this stage of the workflow and may be needed at later stages of the analysis (e.g. for performing DQ vetoes). If the file was generated at run-time the segment lists contained within these files will be an attribute of the instance. (If it will be generated in the workflow it will not be because I am not psychic). """ logging.info("Entering segment generation module") make_analysis_dir(out_dir) cp = workflow.cp # Parse for options in ini file segmentsMethod = cp.get_opt_tags("workflow-segments", "segments-method", [tag]) # These only needed if calling setup_segment_gen_mixed if segmentsMethod in ['AT_RUNTIME','CAT2_PLUS_DAG','CAT3_PLUS_DAG', 'CAT4_PLUS_DAG']: veto_cats = cp.get_opt_tags("workflow-segments", "segments-veto-categories", [tag]) max_veto_cat = max([int(c) for c in veto_cats.split(',')]) veto_categories = range(1, max_veto_cat + 1) if cp.has_option_tags("workflow-segments", "segments-generate-coincident-segments", [tag]): generate_coincident_segs = True else: generate_coincident_segs = False # Need to curl the veto-definer file vetoDefUrl = cp.get_opt_tags("workflow-segments", "segments-veto-definer-url", [tag]) vetoDefBaseName = os.path.basename(vetoDefUrl) vetoDefNewPath = os.path.join(out_dir, vetoDefBaseName) resolve_url(vetoDefUrl,out_dir) # and update location cp.set("workflow-segments", "segments-veto-definer-file", vetoDefNewPath) if cp.has_option_tags("workflow-segments", "segments-minimum-segment-length", [tag]): minSegLength = int( cp.get_opt_tags("workflow-segments", "segments-minimum-segment-length", [tag]) ) else: minSegLength = 0 if segmentsMethod == "AT_RUNTIME": max_veto = 1000 elif segmentsMethod == "CAT2_PLUS_DAG": max_veto = 1 elif segmentsMethod == "CAT3_PLUS_DAG": max_veto = 2 elif segmentsMethod == "CAT4_PLUS_DAG": max_veto = 3 else: msg = "Entry segments-method in [workflow-segments] does not have " msg += "expected value. Valid values are AT_RUNTIME, CAT4_PLUS_DAG, " msg += "CAT2_PLUS_DAG or CAT3_PLUS_DAG." raise ValueError(msg) logging.info("Generating segments with setup_segment_gen_mixed") segFilesList = setup_segment_gen_mixed(workflow, veto_categories, out_dir, max_veto, tag=tag, generate_coincident_segs=generate_coincident_segs) logging.info("Segments obtained") # This creates the segsToAnalyse from the segFilesList. Currently it uses # the 'SCIENCE_OK' segFilesList, which is science - CAT_1 in # setup_segment_gen_mixed. # This also applies the minimum science length segsToAnalyse = {} for ifo in workflow.ifos: analSegs = segFilesList.find_output_with_ifo(ifo) analSegs = analSegs.find_output_with_tag('SCIENCE_OK') assert len(analSegs) == 1 analSegs = analSegs[0] if analSegs.segment_list: if minSegLength: analSegs.remove_short_sci_segs(minSegLength) analSegs.to_segment_xml(override_file_if_exists=True) segsToAnalyse[ifo] = analSegs.segment_list else: msg = "No science segments found for ifo %s. " %(ifo) msg += "If this is unexpected check the files that were dumped " msg += "in the %s directory. Also the " %(out_dir) msg += "commands that can be used to reproduce some of these " msg += "in %s/*.sh" %(os.path.join(out_dir,'logs')) logging.warn(msg) logging.info("Leaving segment generation module") return segsToAnalyse, segFilesList
This function is the gateway for setting up the segment generation steps in a workflow. It is designed to be able to support multiple ways of obtaining these segments and to combine/edit such files as necessary for analysis. The current modules have the capability to generate files at runtime or to generate files that are not needed for workflow generation within the workflow. Parameters ----------- workflow : pycbc.workflow.core.Workflow The workflow instance that the coincidence jobs will be added to. This instance also contains the ifos for which to attempt to obtain segments for this analysis and the start and end times to search for segments over. out_dir : path The directory in which output will be stored. tag : string, optional (default=None) Use this to specify a tag. This can be used if this module is being called more than once to give call specific configuration (by setting options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). This is also used to tag the Files returned by the class to uniqueify the Files and uniqueify the actual filename. FIXME: Filenames may not be unique with current codes! Returns ------- segsToAnalyse : dictionay of ifo-keyed glue.segment.segmentlist instances This will contain the times that your code should analyse. By default this is science time - CAT_1 vetoes. (This default could be changed if desired) segFilesList : pycbc.workflow.core.FileList of SegFile instances These are representations of the various segment files that were constructed at this stage of the workflow and may be needed at later stages of the analysis (e.g. for performing DQ vetoes). If the file was generated at run-time the segment lists contained within these files will be an attribute of the instance. (If it will be generated in the workflow it will not be because I am not psychic).
def send_key(self, key): """Send a key command to the TV.""" if isinstance(key, Keys): key = key.value params = '<X_KeyEvent>{}</X_KeyEvent>'.format(key) self.soap_request(URL_CONTROL_NRC, URN_REMOTE_CONTROL, 'X_SendKey', params)
Send a key command to the TV.
async def jsk_shutdown(self, ctx: commands.Context): """ Logs this bot out. """ await ctx.send("Logging out now..") await ctx.bot.logout()
Logs this bot out.
def _identity(self, *args, **kwargs): ''' Local users and groups. accounts Can be either 'local', 'remote' or 'all' (equal to "local,remote"). Remote accounts cannot be resolved on all systems, but only those, which supports 'passwd -S -a'. disabled True (or False, default) to return only disabled accounts. ''' LOCAL = 'local accounts' EXT = 'external accounts' data = dict() data[LOCAL] = self._get_local_users(disabled=kwargs.get('disabled')) data[EXT] = self._get_external_accounts(data[LOCAL].keys()) or 'N/A' data['local groups'] = self._get_local_groups() return data
Local users and groups. accounts Can be either 'local', 'remote' or 'all' (equal to "local,remote"). Remote accounts cannot be resolved on all systems, but only those, which supports 'passwd -S -a'. disabled True (or False, default) to return only disabled accounts.
def parse_vtrgb(path='/etc/vtrgb'): ''' Parse the color table for the Linux console. ''' palette = () table = [] try: with open(path) as infile: for i, line in enumerate(infile): row = tuple(int(val) for val in line.split(',')) table.append(row) if i == 2: # failsafe break palette = tuple(zip(*table)) # swap rows to columns except IOError as err: palette = color_tables.vga_palette4 return palette
Parse the color table for the Linux console.
def _run_sm_scale_in(self, C_out, scale_sm=91.1876): """Get the SM parameters at the EW scale, using an estimate `C_out` of the Wilson coefficients at that scale, and run them to the input scale.""" # initialize an empty SMEFT instance smeft_sm = SMEFT(wc=None) C_in_sm = smeftutil.C_array2dict(np.zeros(9999)) # set the SM parameters to the values obtained from smpar.smeftpar C_SM = smpar.smeftpar(scale_sm, C_out, basis='Warsaw') SM_keys = set(smeftutil.SM_keys) # to speed up lookup C_SM = {k: v for k, v in C_SM.items() if k in SM_keys} # set the Wilson coefficients at the EW scale to C_out C_in_sm.update(C_out) C_in_sm.update(C_SM) smeft_sm._set_initial(C_in_sm, scale_sm) # run up (with 1% relative precision, ignore running of Wilson coefficients) C_SM_high = smeft_sm._rgevolve(self.scale_in, newphys=False, rtol=0.001, atol=1) C_SM_high = self._rotate_defaultbasis(C_SM_high) return {k: v for k, v in C_SM_high.items() if k in SM_keys}
Get the SM parameters at the EW scale, using an estimate `C_out` of the Wilson coefficients at that scale, and run them to the input scale.
def pydoc_cli_monkey_patched(port): """In Python 3, run pydoc.cli with builtins.input monkey-patched so that pydoc can be run as a process. """ # Monkey-patch input so that input does not raise EOFError when # called by pydoc.cli def input(_): # pylint: disable=W0622 """Monkey-patched version of builtins.input""" while 1: time.sleep(1.0) import builtins builtins.input = input sys.argv += ["-p", port] pydoc.cli()
In Python 3, run pydoc.cli with builtins.input monkey-patched so that pydoc can be run as a process.
def _calculateSegmentActivity(connections, activeInput, connectedPermanence, activationThreshold, minThreshold, reducedThreshold, reducedThresholdCells = ()): """ Calculate the active and matching basal segments for this timestep. @param connections (SparseMatrixConnections) @param activeInput (numpy array) @return (tuple) - activeSegments (numpy array) Dendrite segments with enough active connected synapses to cause a dendritic spike - matchingSegments (numpy array) Dendrite segments with enough active potential synapses to be selected for learning in a bursting column - potentialOverlaps (numpy array) The number of active potential synapses for each segment. Includes counts for active, matching, and nonmatching segments. """ # Active apical segments lower the activation threshold for basal segments overlaps = connections.computeActivity(activeInput, connectedPermanence) outrightActiveSegments = np.flatnonzero(overlaps >= activationThreshold) if (reducedThreshold != activationThreshold and len(reducedThresholdCells) > 0): potentiallyActiveSegments = np.flatnonzero( (overlaps < activationThreshold) & (overlaps >= reducedThreshold)) cellsOfCASegments = connections.mapSegmentsToCells( potentiallyActiveSegments) # apically active segments are condit. active segments from apically # active cells conditionallyActiveSegments = potentiallyActiveSegments[ np.in1d(cellsOfCASegments, reducedThresholdCells)] activeSegments = np.concatenate((outrightActiveSegments, conditionallyActiveSegments)) else: activeSegments = outrightActiveSegments # Matching potentialOverlaps = connections.computeActivity(activeInput) matchingSegments = np.flatnonzero(potentialOverlaps >= minThreshold) return (activeSegments, matchingSegments, potentialOverlaps)
Calculate the active and matching basal segments for this timestep. @param connections (SparseMatrixConnections) @param activeInput (numpy array) @return (tuple) - activeSegments (numpy array) Dendrite segments with enough active connected synapses to cause a dendritic spike - matchingSegments (numpy array) Dendrite segments with enough active potential synapses to be selected for learning in a bursting column - potentialOverlaps (numpy array) The number of active potential synapses for each segment. Includes counts for active, matching, and nonmatching segments.
def get_error(response): """Gets Error by HTTP Status Code""" errors = { 400: BadRequestError, 401: UnauthorizedError, 403: AccessDeniedError, 404: NotFoundError, 429: RateLimitExceededError, 500: ServerError, 502: BadGatewayError, 503: ServiceUnavailableError } error_class = HTTPError if response.status_code in errors: error_class = errors[response.status_code] return error_class(response)
Gets Error by HTTP Status Code
def build_project(self): """ Build IAR project """ # > IarBuild [project_path] -build [project_name] proj_path = join(getcwd(), self.workspace['files']['ewp']) if proj_path.split('.')[-1] != 'ewp': proj_path += '.ewp' if not os.path.exists(proj_path): logger.debug("The file: %s does not exists, exported prior building?" % proj_path) return -1 logger.debug("Building IAR project: %s" % proj_path) args = [join(self.env_settings.get_env_settings('iar'), 'IarBuild.exe'), proj_path, '-build', os.path.splitext(os.path.basename(self.workspace['files']['ewp']))[0]] logger.debug(args) try: p = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE) output, err = p.communicate() except: logger.error("Project: %s build failed. Please check IARBUILD path in the user_settings.py file." % self.workspace['files']['ewp']) return -1 else: build_log_path = os.path.join(os.path.dirname(proj_path),'build_log.txt') with open(build_log_path, 'w') as f: f.write(output) num_errors = self._parse_subprocess_output(output) if num_errors == 0: logger.info("Project: %s build completed." % self.workspace['files']['ewp']) return 0 else: logger.error("Project: %s build failed with %d errors" % (self.workspace['files']['ewp'], num_errors)) return -1
Build IAR project
def get_descriptives(data): """Get mean, SD, and mean and SD of log values. Parameters ---------- data : ndarray Data with segment as first dimension and all other dimensions raveled into second dimension. Returns ------- dict of ndarray each entry is a 1-D vector of descriptives over segment dimension """ output = {} dat_log = log(abs(data)) output['mean'] = nanmean(data, axis=0) output['sd'] = nanstd(data, axis=0) output['mean_log'] = nanmean(dat_log, axis=0) output['sd_log'] = nanstd(dat_log, axis=0) return output
Get mean, SD, and mean and SD of log values. Parameters ---------- data : ndarray Data with segment as first dimension and all other dimensions raveled into second dimension. Returns ------- dict of ndarray each entry is a 1-D vector of descriptives over segment dimension
def flatten(self): """Create a flattened version by putting output first and then states.""" ls = [self.output] ls.extend(self.state) return ls
Create a flattened version by putting output first and then states.
def send_stun(self, message, addr): """ Send a STUN message. """ self.__log_debug('> %s %s', addr, message) self.transport.sendto(bytes(message), addr)
Send a STUN message.
def make_sentence(list_words): """ Return a sentence from list of words. :param list list_words: list of words :returns: sentence :rtype: str """ lw_len = len(list_words) if lw_len > 6: list_words.insert(lw_len // 2 + random.choice(range(-2, 2)), ',') sentence = ' '.join(list_words).replace(' ,', ',') return sentence.capitalize() + '.'
Return a sentence from list of words. :param list list_words: list of words :returns: sentence :rtype: str
def _from_dict(cls, _dict): """Initialize a Corpora object from a json dictionary.""" args = {} if 'corpora' in _dict: args['corpora'] = [ Corpus._from_dict(x) for x in (_dict.get('corpora')) ] else: raise ValueError( 'Required property \'corpora\' not present in Corpora JSON') return cls(**args)
Initialize a Corpora object from a json dictionary.
def ignore_after(seconds, coro=None, *args, timeout_result=None): '''Execute the specified coroutine and return its result. Issue a cancellation request after seconds have elapsed. When a timeout occurs, no exception is raised. Instead, timeout_result is returned. If coro is None, the result is an asynchronous context manager that applies a timeout to a block of statements. For the context manager case, the resulting context manager object has an expired attribute set to True if time expired. Note: ignore_after() may also be composed with other timeout operations. TimeoutCancellationError and UncaughtTimeoutError exceptions might be raised according to the same rules as for timeout_after(). ''' if coro: return _ignore_after_func(seconds, False, coro, args, timeout_result) return TimeoutAfter(seconds, ignore=True)
Execute the specified coroutine and return its result. Issue a cancellation request after seconds have elapsed. When a timeout occurs, no exception is raised. Instead, timeout_result is returned. If coro is None, the result is an asynchronous context manager that applies a timeout to a block of statements. For the context manager case, the resulting context manager object has an expired attribute set to True if time expired. Note: ignore_after() may also be composed with other timeout operations. TimeoutCancellationError and UncaughtTimeoutError exceptions might be raised according to the same rules as for timeout_after().
def get_unspents(self): """Fetches all available unspent transaction outputs. :rtype: ``list`` of :class:`~bitcash.network.meta.Unspent` """ self.unspents[:] = NetworkAPI.get_unspent(self.address) self.balance = sum(unspent.amount for unspent in self.unspents) return self.unspents
Fetches all available unspent transaction outputs. :rtype: ``list`` of :class:`~bitcash.network.meta.Unspent`
def _get_filename(self, key, filename): """Write key to file. Either this method or :meth:`~simplekv.KeyValueStore._get_file` will be called by :meth:`~simplekv.KeyValueStore.get_file`. This method only accepts filenames and will open the file with a mode of ``wb``, then call :meth:`~simplekv.KeyValueStore._get_file`. :param key: Key to be retrieved :param filename: Filename to write to """ with open(filename, 'wb') as dest: return self._get_file(key, dest)
Write key to file. Either this method or :meth:`~simplekv.KeyValueStore._get_file` will be called by :meth:`~simplekv.KeyValueStore.get_file`. This method only accepts filenames and will open the file with a mode of ``wb``, then call :meth:`~simplekv.KeyValueStore._get_file`. :param key: Key to be retrieved :param filename: Filename to write to
def _compile_int_f(self): """Time Domain Simulation - update differential equations""" string = '"""\n' string += 'system.dae.init_f()\n' # evaluate differential equations f for fcall, call in zip(self.fcall, self.fcalls): if fcall: string += call string += 'system.dae.reset_small_f()\n' string += '"""' self.int_f = compile(eval(string), '', 'exec')
Time Domain Simulation - update differential equations
def poll(self, timeout=-1, maxevents=-1): """ Poll for events :param timeout: The amount of seconds to wait for events before giving up. The default value, -1, represents infinity. Note that unlike the underlying ``epoll_wait()`` timeout is a fractional number representing **seconds**. :param maxevents: The maximum number of events to report. The default is a reasonably-sized maximum, identical to the one selected by Python 3.4. :returns: A list of (fd, events) that were reported or an empty list if the timeout elapsed. :raises ValueError: If :meth:`closed()` is True :raises OSError: If the underlying ``epoll_wait(2)`` fails. The error message matches those found in the manual page. """ if self._epfd < 0: _err_closed() if timeout != -1: # 1000 because epoll_wait(2) uses milliseconds timeout = int(timeout * 1000) if maxevents == -1: maxevents = FD_SETSIZE - 1 events = (epoll_event * maxevents)() num_events = epoll_wait( self._epfd, cast(byref(events), POINTER(epoll_event)), maxevents, timeout) return [(events[i].data.fd, events[i].events) for i in range(num_events)]
Poll for events :param timeout: The amount of seconds to wait for events before giving up. The default value, -1, represents infinity. Note that unlike the underlying ``epoll_wait()`` timeout is a fractional number representing **seconds**. :param maxevents: The maximum number of events to report. The default is a reasonably-sized maximum, identical to the one selected by Python 3.4. :returns: A list of (fd, events) that were reported or an empty list if the timeout elapsed. :raises ValueError: If :meth:`closed()` is True :raises OSError: If the underlying ``epoll_wait(2)`` fails. The error message matches those found in the manual page.
def datapath(self): """ Get an item's data path. """ path = self._fields['path'] if not path: # stopped item with no base_dir? path = self.fetch('directory') if path and not self._fields['is_multi_file']: path = os.path.join(path, self._fields['name']) return os.path.expanduser(fmt.to_unicode(path))
Get an item's data path.
def after(f, chain=False): """Runs f with the result of the decorated function.""" def decorator(g): @wraps(g) def h(*args, **kargs): if chain: return f(g(*args, **kargs)) else: r = g(*args, **kargs) f(*args, **kargs) return r return h return decorator
Runs f with the result of the decorated function.
def get_attributes(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if attributes are set on an ELB. CLI example: .. code-block:: bash salt myminion boto_elb.get_attributes myelb ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) retries = 30 while retries: try: lbattrs = conn.get_all_lb_attributes(name) ret = odict.OrderedDict() ret['access_log'] = odict.OrderedDict() ret['cross_zone_load_balancing'] = odict.OrderedDict() ret['connection_draining'] = odict.OrderedDict() ret['connecting_settings'] = odict.OrderedDict() al = lbattrs.access_log czlb = lbattrs.cross_zone_load_balancing cd = lbattrs.connection_draining cs = lbattrs.connecting_settings ret['access_log']['enabled'] = al.enabled ret['access_log']['s3_bucket_name'] = al.s3_bucket_name ret['access_log']['s3_bucket_prefix'] = al.s3_bucket_prefix ret['access_log']['emit_interval'] = al.emit_interval ret['cross_zone_load_balancing']['enabled'] = czlb.enabled ret['connection_draining']['enabled'] = cd.enabled ret['connection_draining']['timeout'] = cd.timeout ret['connecting_settings']['idle_timeout'] = cs.idle_timeout return ret except boto.exception.BotoServerError as e: if e.error_code == 'Throttling': log.debug("Throttled by AWS API, will retry in 5 seconds...") time.sleep(5) retries -= 1 continue log.error('ELB %s does not exist: %s', name, e.message) return {} return {}
Check to see if attributes are set on an ELB. CLI example: .. code-block:: bash salt myminion boto_elb.get_attributes myelb
def garbage(): """ Collect garbage and return an :class:`~refcycle.object_graph.ObjectGraph` based on collected garbage. The collected elements are removed from ``gc.garbage``, but are still kept alive by the references in the graph. Deleting the :class:`~refcycle.object_graph.ObjectGraph` instance and doing another ``gc.collect`` will remove those objects for good. """ with restore_gc_state(): gc.disable() gc.set_debug(gc.DEBUG_SAVEALL) collected_count = gc.collect() if collected_count: objects = gc.garbage[-collected_count:] del gc.garbage[-collected_count:] else: objects = [] return ObjectGraph(objects)
Collect garbage and return an :class:`~refcycle.object_graph.ObjectGraph` based on collected garbage. The collected elements are removed from ``gc.garbage``, but are still kept alive by the references in the graph. Deleting the :class:`~refcycle.object_graph.ObjectGraph` instance and doing another ``gc.collect`` will remove those objects for good.
def _seg_to_vcf(vals): """Convert GATK CNV calls seg output to a VCF line. """ call_to_cn = {"+": 3, "-": 1} call_to_type = {"+": "DUP", "-": "DEL"} if vals["CALL"] not in ["0"]: info = ["FOLD_CHANGE_LOG=%s" % vals["MEAN_LOG2_COPY_RATIO"], "PROBES=%s" % vals["NUM_POINTS_COPY_RATIO"], "SVTYPE=%s" % call_to_type[vals["CALL"]], "SVLEN=%s" % (int(vals["END"]) - int(vals["START"])), "END=%s" % vals["END"], "CN=%s" % call_to_cn[vals["CALL"]]] return [vals["CONTIG"], vals["START"], ".", "N", "<%s>" % call_to_type[vals["CALL"]], ".", ".", ";".join(info), "GT", "0/1"]
Convert GATK CNV calls seg output to a VCF line.
def pack(self, value=None): """Pack the struct in a binary representation. Iterate over the class attributes, according to the order of definition, and then convert each attribute to its byte representation using its own ``pack`` method. Returns: bytes: Binary representation of the struct object. Raises: :exc:`~.exceptions.ValidationError`: If validation fails. """ if value is None: if not self.is_valid(): error_msg = "Error on validation prior to pack() on class " error_msg += "{}.".format(type(self).__name__) raise ValidationError(error_msg) else: message = b'' # pylint: disable=no-member for attr_info in self._get_named_attributes(): name, instance_value, class_value = attr_info try: message += class_value.pack(instance_value) except PackException as pack_exception: cls = type(self).__name__ msg = f'{cls}.{name} - {pack_exception}' raise PackException(msg) return message elif isinstance(value, type(self)): return value.pack() else: msg = "{} is not an instance of {}".format(value, type(self).__name__) raise PackException(msg)
Pack the struct in a binary representation. Iterate over the class attributes, according to the order of definition, and then convert each attribute to its byte representation using its own ``pack`` method. Returns: bytes: Binary representation of the struct object. Raises: :exc:`~.exceptions.ValidationError`: If validation fails.
def removeSinglePixels(img): ''' img - boolean array remove all pixels that have no neighbour ''' gx = img.shape[0] gy = img.shape[1] for i in range(gx): for j in range(gy): if img[i, j]: found_neighbour = False for ii in range(max(0, i - 1), min(gx, i + 2)): for jj in range(max(0, j - 1), min(gy, j + 2)): if ii == i and jj == j: continue if img[ii, jj]: found_neighbour = True break if found_neighbour: break if not found_neighbour: img[i, j] = 0
img - boolean array remove all pixels that have no neighbour
async def shutdown(self, container, force=False): ''' Shutdown all connections. Exclusive connections created by get_connection will shutdown after release() ''' p = self._connpool self._connpool = [] self._shutdown = True if self._defaultconn: p.append(self._defaultconn) self._defaultconn = None if self._subscribeconn: p.append(self._subscribeconn) self._subscribeconn = None await container.execute_all([self._shutdown_conn(container, o, force) for o in p])
Shutdown all connections. Exclusive connections created by get_connection will shutdown after release()
def gaussian_filter1d_ppxf(spec, sig): """ Convolve a spectrum by a Gaussian with different sigma for every pixel. If all sigma are the same this routine produces the same output as scipy.ndimage.gaussian_filter1d, except for the border treatment. Here the first/last p pixels are filled with zeros. When creating a template library for SDSS data, this implementation is 60x faster than a naive for loop over pixels. :param spec: vector with the spectrum to convolve :param sig: vector of sigma values (in pixels) for every pixel :return: spec convolved with a Gaussian with dispersion sig """ sig = sig.clip(0.01) # forces zero sigmas to have 0.01 pixels p = int(np.ceil(np.max(3*sig))) m = 2*p + 1 # kernel size x2 = np.linspace(-p, p, m)**2 n = spec.size a = np.zeros((m, n)) # fig, ax = plt.subplots(1, 1, figsize=(16, 10)) for j in range(m): # Loop over the small size of the kernel #print j, n-m+j+1 indices = n-m+j+1 a[j,:] = spec a[j, p:-p] = spec[j:n-m+j+1] # ax.plot(waveData, a[j,:], label=j) # ax.update({'xlabel': 'Wavelength (nm)', 'ylabel': 'Flux (normalised)'}) # ax.legend() # plt.show() gau = np.exp(-x2[:, None]/(2*sig**2)) gau /= np.sum(gau, 0)[None, :] # Normalize kernel conv_spectrum = np.sum(a*gau, 0) return conv_spectrum
Convolve a spectrum by a Gaussian with different sigma for every pixel. If all sigma are the same this routine produces the same output as scipy.ndimage.gaussian_filter1d, except for the border treatment. Here the first/last p pixels are filled with zeros. When creating a template library for SDSS data, this implementation is 60x faster than a naive for loop over pixels. :param spec: vector with the spectrum to convolve :param sig: vector of sigma values (in pixels) for every pixel :return: spec convolved with a Gaussian with dispersion sig
def _golbub_welsch(orders, coeff1, coeff2): """Recurrence coefficients to abscisas and weights.""" abscisas, weights = [], [] for dim, order in enumerate(orders): if order: bands = numpy.zeros((2, order)) bands[0] = coeff1[dim, :order] bands[1, :-1] = numpy.sqrt(coeff2[dim, 1:order]) vals, vecs = scipy.linalg.eig_banded(bands, lower=True) abscisa, weight = vals.real, vecs[0, :]**2 indices = numpy.argsort(abscisa) abscisa, weight = abscisa[indices], weight[indices] else: abscisa, weight = numpy.array([coeff1[dim, 0]]), numpy.array([1.]) abscisas.append(abscisa) weights.append(weight) return abscisas, weights
Recurrence coefficients to abscisas and weights.
def _del_thread(self, dwThreadId): """ Private method to remove a thread object from the snapshot. @type dwThreadId: int @param dwThreadId: Global thread ID. """ try: aThread = self.__threadDict[dwThreadId] del self.__threadDict[dwThreadId] except KeyError: aThread = None msg = "Unknown thread ID %d" % dwThreadId warnings.warn(msg, RuntimeWarning) if aThread: aThread.clear()
Private method to remove a thread object from the snapshot. @type dwThreadId: int @param dwThreadId: Global thread ID.
def get_vlan_brief_input_request_type_get_request_vlan_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vlan_brief = ET.Element("get_vlan_brief") config = get_vlan_brief input = ET.SubElement(get_vlan_brief, "input") request_type = ET.SubElement(input, "request-type") get_request = ET.SubElement(request_type, "get-request") vlan_id = ET.SubElement(get_request, "vlan-id") vlan_id.text = kwargs.pop('vlan_id') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def Connect(self, Username, WaitConnected=False): """Connects application to user. :Parameters: Username : str Name of the user to connect to. WaitConnected : bool If True, causes the method to wait until the connection is established. :return: If ``WaitConnected`` is True, returns the stream which can be used to send the data. Otherwise returns None. :rtype: `ApplicationStream` or None """ if WaitConnected: self._Connect_Event = threading.Event() self._Connect_Stream = [None] self._Connect_Username = Username self._Connect_ApplicationStreams(self, self.Streams) self._Owner.RegisterEventHandler('ApplicationStreams', self._Connect_ApplicationStreams) self._Alter('CONNECT', Username) self._Connect_Event.wait() self._Owner.UnregisterEventHandler('ApplicationStreams', self._Connect_ApplicationStreams) try: return self._Connect_Stream[0] finally: del self._Connect_Stream, self._Connect_Event, self._Connect_Username else: self._Alter('CONNECT', Username)
Connects application to user. :Parameters: Username : str Name of the user to connect to. WaitConnected : bool If True, causes the method to wait until the connection is established. :return: If ``WaitConnected`` is True, returns the stream which can be used to send the data. Otherwise returns None. :rtype: `ApplicationStream` or None
def count_string_diff(a,b): """Return the number of characters in two strings that don't exactly match""" shortest = min(len(a), len(b)) return sum(a[i] != b[i] for i in range(shortest))
Return the number of characters in two strings that don't exactly match
def _check_distributed_corpora_file(self): """Check '~/cltk_data/distributed_corpora.yaml' for any custom, distributed corpora that the user wants to load locally. TODO: write check or try if `cltk_data` dir is not present """ if self.testing: distributed_corpora_fp = os.path.expanduser('~/cltk_data/test_distributed_corpora.yaml') else: distributed_corpora_fp = os.path.expanduser('~/cltk_data/distributed_corpora.yaml') try: with open(distributed_corpora_fp) as file_open: corpora_dict = yaml.safe_load(file_open) except FileNotFoundError: logger.info('`~/cltk_data/distributed_corpora.yaml` file not found.') return [] except yaml.parser.ParserError as parse_err: logger.debug('Yaml parsing error: %s' % parse_err) return [] user_defined_corpora = [] for corpus_name in corpora_dict: about = corpora_dict[corpus_name] if about['language'].lower() == self.language: user_defined_corpus = dict() # user_defined_corpus['git_remote'] = about['git_remote'] user_defined_corpus['origin'] = about['origin'] user_defined_corpus['type'] = about['type'] user_defined_corpus['name'] = corpus_name user_defined_corpora.append(user_defined_corpus) return user_defined_corpora
Check '~/cltk_data/distributed_corpora.yaml' for any custom, distributed corpora that the user wants to load locally. TODO: write check or try if `cltk_data` dir is not present
def parseCmdline(rh, posOpsList, keyOpsList): """ Parse the request command input. Input: Request Handle Positional Operands List. This is a dictionary that contains an array for each subfunction. The array contains a entry (itself an array) for each positional operand. That array contains: - Human readable name of the operand, - Property in the parms dictionary to hold the value, - Is it required (True) or optional (False), - Type of data (1: int, 2: string). Keyword Operands List. This is a dictionary that contains an item for each subfunction. The value for the subfunction is a dictionary that contains a key for each recognized operand. The value associated with the key is an array that contains the following: - the related ReqHandle.parms item that stores the value, - how many values follow the keyword, and - the type of data for those values (1: int, 2: string) Output: Request Handle updated with parsed input. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter generalUtils.parseCmdline") # Handle any positional operands on the line. if rh.results['overallRC'] == 0 and rh.subfunction in posOpsList: ops = posOpsList[rh.subfunction] currOp = 0 # While we have operands on the command line AND # we have more operands in the positional operand list. while rh.argPos < rh.totalParms and currOp < len(ops): key = ops[currOp][1] # key for rh.parms[] opType = ops[currOp][3] # data type if opType == 1: # Handle an integer data type try: rh.parms[key] = int(rh.request[rh.argPos]) except ValueError: # keyword is not an integer msg = msgs.msg['0001'][1] % (modId, rh.function, rh.subfunction, (currOp + 1), ops[currOp][0], rh.request[rh.argPos]) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0001'][0]) break else: rh.parms[key] = rh.request[rh.argPos] currOp += 1 rh.argPos += 1 if (rh.argPos >= rh.totalParms and currOp < len(ops) and ops[currOp][2] is True): # Check for missing required operands. msg = msgs.msg['0002'][1] % (modId, rh.function, rh.subfunction, ops[currOp][0], (currOp + 1)) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0002'][0]) # Handle any keyword operands on the line. if rh.results['overallRC'] == 0 and rh.subfunction in keyOpsList: while rh.argPos < rh.totalParms: if rh.request[rh.argPos] in keyOpsList[rh.subfunction]: keyword = rh.request[rh.argPos] rh.argPos += 1 ops = keyOpsList[rh.subfunction] if keyword in ops: key = ops[keyword][0] opCnt = ops[keyword][1] opType = ops[keyword][2] if opCnt == 0: # Keyword has no additional value rh.parms[key] = True else: # Keyword has values following it. storeIntoArray = False # Assume single word if opCnt < 0: storeIntoArray = True # Property is a list all of the rest of the parms. opCnt = rh.totalParms - rh.argPos if opCnt == 0: # Need at least 1 operand value opCnt = 1 elif opCnt > 1: storeIntoArray = True if opCnt + rh.argPos > rh.totalParms: # keyword is missing its related value operand msg = msgs.msg['0003'][1] % (modId, rh.function, rh.subfunction, keyword) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0003'][0]) break """ Add the expected value to the property. Take into account if there are more than 1. """ if storeIntoArray: # Initialize the list. rh.parms[key] = [] for i in range(0, opCnt): if opType == 1: # convert from string to int and save it. try: if not storeIntoArray: rh.parms[key] = ( int(rh.request[rh.argPos])) else: rh.parms[key].append(int( rh.request[rh.argPos])) except ValueError: # keyword is not an integer msg = (msgs.msg['0004'][1] % (modId, rh.function, rh.subfunction, keyword, rh.request[rh.argPos])) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0004'][0]) break else: # Value is a string, save it. if not storeIntoArray: rh.parms[key] = rh.request[rh.argPos] else: rh.parms[key].append(rh.request[rh.argPos]) rh.argPos += 1 if rh.results['overallRC'] != 0: # Upper loop had an error break from loops. break else: # keyword is not in the subfunction's keyword list msg = msgs.msg['0005'][1] % (modId, rh.function, rh.subfunction, keyword) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0005'][0]) break else: # Subfunction does not support keywords msg = (msgs.msg['0006'][1] % (modId, rh.function, rh.subfunction, rh.request[rh.argPos])) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0006'][0]) break rh.printSysLog("Exit generalUtils.parseCmdLine, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC']
Parse the request command input. Input: Request Handle Positional Operands List. This is a dictionary that contains an array for each subfunction. The array contains a entry (itself an array) for each positional operand. That array contains: - Human readable name of the operand, - Property in the parms dictionary to hold the value, - Is it required (True) or optional (False), - Type of data (1: int, 2: string). Keyword Operands List. This is a dictionary that contains an item for each subfunction. The value for the subfunction is a dictionary that contains a key for each recognized operand. The value associated with the key is an array that contains the following: - the related ReqHandle.parms item that stores the value, - how many values follow the keyword, and - the type of data for those values (1: int, 2: string) Output: Request Handle updated with parsed input. Return code - 0: ok, non-zero: error
def create_instance(self, image_id, pem_file, group_ids, instance_type, volume_type='gp2', ebs_optimized=False, instance_monitoring=False, iam_profile='', tag_list=None, auction_bid=0.0): ''' a method for starting an instance on AWS EC2 :param image_id: string with aws id of image for instance :param pem_file: string with path to pem file to access image :param group_ids: list with aws id of security group(s) to attach to instance :param instance_type: string with type of instance resource to use :param volume_type: string with type of on-disk storage :param ebs_optimized: [optional] boolean to activate ebs optimization :param instance_monitoring: [optional] boolean to active instance monitoring :param iam_profile: [optional] string with name of iam instance profile role :param tag_list: [optional] list of single key-pair tags for instance :param auction_bid: [optional] float with dollar amount to bid for instance hour :return: string with id of instance ''' title = '%s.create_instance' % self.__class__.__name__ # validate inputs input_fields = { 'image_id': image_id, 'pem_file': pem_file, 'group_ids': group_ids, 'instance_type': instance_type, 'volume_type': volume_type, 'iam_profile': iam_profile, 'tag_list': tag_list, 'auction_bid': auction_bid } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # print warning about auction if auction_bid: self.iam.printer('[WARNING]: auction bidding is not yet available.') # turn off verbosity self.iam.printer_on = False # verify existence of image try: self.read_image(image_id) except: raise ValueError('Image %s does not exist in EC2 account or permission scope.') # verify existence of security group group_list = self.list_security_groups() for id in group_ids: if id not in group_list: raise ValueError('Security group %s does not exist in EC2 account.' % id) # verify existence of iam profile if iam_profile: if not iam_profile in self.iam.list_roles(): raise ValueError('Iam instance profile %s does not exist in IAM account.' % iam_profile) # validate path to pem file from os import path if not path.exists(pem_file): raise ValueError('%s is not a valid path on localhost.' % pem_file) # verify existence of pem name pem_absolute = path.abspath(pem_file) pem_root, pem_ext = path.splitext(pem_absolute) pem_path, pem_name = path.split(pem_root) if not pem_name in self.list_keypairs(): raise ValueError('Pem file name %s does not exist in EC2 account.' % pem_name) # turn on verbosity self.iam.printer_on = True # create client token and timestamp for instance from labpack.records.id import labID record_id = labID() client_token = 'CT-%s' % record_id.id36 from labpack.records.time import labDT timestamp = labDT.new().zulu() # construct tag list if not tag_list: tag_list = [] for tag in tag_list: if tag['key'] == 'BuildDate': tag['value'] = timestamp # create keyword argument definitions kw_args = { 'DryRun': False, 'ImageId': image_id, 'MinCount': 1, 'MaxCount': 1, 'KeyName': pem_name, 'SecurityGroupIds': group_ids, 'InstanceType': instance_type, 'ClientToken': client_token, 'Monitoring': { 'Enabled': instance_monitoring }, 'EbsOptimized': ebs_optimized, 'BlockDeviceMappings': [] } kw_args['BlockDeviceMappings'].append( { "DeviceName": "/dev/xvda", "Ebs": { "VolumeType": volume_type } } ) if iam_profile: kw_args['IamInstanceProfile'] = { 'Name': iam_profile } # start instance on aws self.iam.printer('Initiating instance of image %s.' % image_id) try: response = self.connection.run_instances(**kw_args) except Exception as err: if str(err).find('non-VPC'): self.iam.printer('Default VPC Error Detected!\nAttempting to add Subnet declaration.') group_details = self.read_security_group(group_ids[0]) env_type = '' for tag in group_details['tags']: if tag['Key'] == 'Env': env_type = tag['Value'] if env_type: subnet_list = self.list_subnets(tag_values=[env_type]) else: subnet_list = self.list_subnets() error_msg = '%s requires a Subnet match the Security Group %s' % (title, group_ids[0]) if not subnet_list: raise AWSConnectionError(error_msg) subnet_id = '' for subnet in subnet_list: subnet_details = self.read_subnet(subnet) if subnet_details['vpc_id'] == group_details['vpc_id']: subnet_id = subnet if not subnet_id: raise AWSConnectionError(error_msg) kw_args['SubnetId'] = subnet_id try: response = self.connection.run_instances(**kw_args) except: raise AWSConnectionError('%s(%s)' % (title, kw_args)) else: raise AWSConnectionError('%s(%s)' % (title, kw_args)) # parse instance id from response instance_id = '' instance_list = response['Instances'] for i in range(0, len(instance_list)): if instance_list[i]['ClientToken'] == client_token: instance_id = instance_list[i]['InstanceId'] if instance_id: self.iam.printer('Instance %s has been initiated.' % instance_id) else: raise Exception('Failure creating instance from image %s.' % image_id) # tag instance with instance tags self.tag_instance(instance_id, tag_list) return instance_id
a method for starting an instance on AWS EC2 :param image_id: string with aws id of image for instance :param pem_file: string with path to pem file to access image :param group_ids: list with aws id of security group(s) to attach to instance :param instance_type: string with type of instance resource to use :param volume_type: string with type of on-disk storage :param ebs_optimized: [optional] boolean to activate ebs optimization :param instance_monitoring: [optional] boolean to active instance monitoring :param iam_profile: [optional] string with name of iam instance profile role :param tag_list: [optional] list of single key-pair tags for instance :param auction_bid: [optional] float with dollar amount to bid for instance hour :return: string with id of instance
def stop(self): """Stop the publisher. """ self.publish.setsockopt(zmq.LINGER, 1) self.publish.close() return self
Stop the publisher.
def cVectorToPython(x): """ Convert the c vector data into the correct python data type (numpy arrays or strings) :param x: :return: """ if isinstance(x[0], bool): return numpy.frombuffer(x, dtype=numpy.bool).copy() elif isinstance(x[0], int): return numpy.frombuffer(x, dtype=numpy.int32).copy() elif isinstance(x[0], float): return numpy.frombuffer(x, dtype=numpy.float64).copy() elif isinstance(x[0].value, bytes): return [toPythonString(y) for y in x]
Convert the c vector data into the correct python data type (numpy arrays or strings) :param x: :return:
def _get_init_args(self): """Creates dict with properties marked as readonly""" args = {} for rop in self.ro_properties: if rop in self.properties: args[rop] = self.properties[rop] return args
Creates dict with properties marked as readonly
def CopyToDict(self): """Copies the event tag to a dictionary. Returns: dict[str, object]: event tag attributes. """ result_dict = { 'labels': self.labels } if self.comment: result_dict['comment'] = self.comment return result_dict
Copies the event tag to a dictionary. Returns: dict[str, object]: event tag attributes.
def install(cert, password, keychain="/Library/Keychains/System.keychain", allow_any=False, keychain_password=None): ''' Install a certificate cert The certificate to install password The password for the certificate being installed formatted in the way described for openssl command in the PASS PHRASE ARGUMENTS section. Note: The password given here will show up as plaintext in the job returned info. keychain The keychain to install the certificate to, this defaults to /Library/Keychains/System.keychain allow_any Allow any application to access the imported certificate without warning keychain_password If your keychain is likely to be locked pass the password and it will be unlocked before running the import Note: The password given here will show up as plaintext in the returned job info. CLI Example: .. code-block:: bash salt '*' keychain.install test.p12 test123 ''' if keychain_password is not None: unlock_keychain(keychain, keychain_password) cmd = 'security import {0} -P {1} -k {2}'.format(cert, password, keychain) if allow_any: cmd += ' -A' return __salt__['cmd.run'](cmd)
Install a certificate cert The certificate to install password The password for the certificate being installed formatted in the way described for openssl command in the PASS PHRASE ARGUMENTS section. Note: The password given here will show up as plaintext in the job returned info. keychain The keychain to install the certificate to, this defaults to /Library/Keychains/System.keychain allow_any Allow any application to access the imported certificate without warning keychain_password If your keychain is likely to be locked pass the password and it will be unlocked before running the import Note: The password given here will show up as plaintext in the returned job info. CLI Example: .. code-block:: bash salt '*' keychain.install test.p12 test123
def event_types(self): """ Raises ------ IndexError When there is no selected rater """ try: events = self.rater.find('events') except AttributeError: raise IndexError('You need to have at least one rater') return [x.get('type') for x in events]
Raises ------ IndexError When there is no selected rater
def find_frequencies(data, freq=44100, bits=16): """Convert audio data into a frequency-amplitude table using fast fourier transformation. Return a list of tuples (frequency, amplitude). Data should only contain one channel of audio. """ # Fast fourier transform n = len(data) p = _fft(data) uniquePts = numpy.ceil((n + 1) / 2.0) # Scale by the length (n) and square the value to get the amplitude p = [(abs(x) / float(n)) ** 2 * 2 for x in p[0:uniquePts]] p[0] = p[0] / 2 if n % 2 == 0: p[-1] = p[-1] / 2 # Generate the frequencies and zip with the amplitudes s = freq / float(n) freqArray = numpy.arange(0, uniquePts * s, s) return zip(freqArray, p)
Convert audio data into a frequency-amplitude table using fast fourier transformation. Return a list of tuples (frequency, amplitude). Data should only contain one channel of audio.
def finalizePrivateLessonRegistration(sender,**kwargs): ''' Once a private lesson registration is finalized, mark the slots that were used to book the private lesson as booked and associate them with the final registration. No need to notify students in this instance because they are already receiving a notification of their registration. ''' finalReg = kwargs.pop('registration') for er in finalReg.eventregistration_set.filter( event__privatelessonevent__isnull=False ): er.event.finalizeBooking(eventRegistration=er,notifyStudent=False)
Once a private lesson registration is finalized, mark the slots that were used to book the private lesson as booked and associate them with the final registration. No need to notify students in this instance because they are already receiving a notification of their registration.
def _delta_kt_prime_dirint(kt_prime, use_delta_kt_prime, times): """ Calculate delta_kt_prime (Perez eqn 2 and eqn 3), or return a default value for use with :py:func:`_dirint_bins`. """ if use_delta_kt_prime: # Perez eqn 2 kt_next = kt_prime.shift(-1) kt_previous = kt_prime.shift(1) # replace nan with values that implement Perez Eq 3 for first and last # positions. Use kt_previous and kt_next to handle series of length 1 kt_next.iloc[-1] = kt_previous.iloc[-1] kt_previous.iloc[0] = kt_next.iloc[0] delta_kt_prime = 0.5 * ((kt_prime - kt_next).abs().add( (kt_prime - kt_previous).abs(), fill_value=0)) else: # do not change unless also modifying _dirint_bins delta_kt_prime = pd.Series(-1, index=times) return delta_kt_prime
Calculate delta_kt_prime (Perez eqn 2 and eqn 3), or return a default value for use with :py:func:`_dirint_bins`.
def initSeasonFactors(self, timeSeries): """ Computes the initial season smoothing factors. :return: Returns a list of season vectors of length "seasonLength". :rtype: list """ seasonLength = self.get_parameter("seasonLength") try: seasonValues = self.get_parameter("seasonValues") assert seasonLength == len(seasonValues), "Preset Season Values have to have to be of season's length" return seasonValues except KeyError: pass seasonValues = [] completeCycles = len(timeSeries) / seasonLength A = {} #cache values for A_j for i in xrange(seasonLength): c_i = 0 for j in xrange(completeCycles): if j not in A: A[j] = self.computeA(j, timeSeries) c_i += timeSeries[(seasonLength * j) + i][1] / A[j] #wikipedia suggests j-1, but we worked with indices in the first place c_i /= completeCycles seasonValues.append(c_i) return seasonValues
Computes the initial season smoothing factors. :return: Returns a list of season vectors of length "seasonLength". :rtype: list
def apply(self, fn, dtype=None, seed=None): """ Transform each row to an :class:`~turicreate.SArray` according to a specified function. Returns a new SArray of ``dtype`` where each element in this SArray is transformed by `fn(x)` where `x` is a single row in the sframe represented as a dictionary. The ``fn`` should return exactly one value which can be cast into type ``dtype``. If ``dtype`` is not specified, the first 100 rows of the SFrame are used to make a guess of the target data type. Parameters ---------- fn : function The function to transform each row of the SFrame. The return type should be convertible to `dtype` if `dtype` is not None. This can also be a toolkit extension function which is compiled as a native shared library using SDK. dtype : dtype, optional The dtype of the new SArray. If None, the first 100 elements of the array are used to guess the target data type. seed : int, optional Used as the seed if a random number generator is included in `fn`. Returns ------- out : SArray The SArray transformed by fn. Each element of the SArray is of type ``dtype`` Examples -------- Concatenate strings from several columns: >>> sf = turicreate.SFrame({'user_id': [1, 2, 3], 'movie_id': [3, 3, 6], 'rating': [4, 5, 1]}) >>> sf.apply(lambda x: str(x['user_id']) + str(x['movie_id']) + str(x['rating'])) dtype: str Rows: 3 ['134', '235', '361'] """ assert callable(fn), "Input must be callable" test_sf = self[:10] dryrun = [fn(row) for row in test_sf] if dtype is None: dtype = SArray(dryrun).dtype if seed is None: seed = abs(hash("%0.20f" % time.time())) % (2 ** 31) nativefn = None try: from .. import extensions as extensions nativefn = extensions._build_native_function_call(fn) except: pass if nativefn is not None: # this is a toolkit lambda. We can do something about it with cython_context(): return SArray(_proxy=self.__proxy__.transform_native(nativefn, dtype, seed)) with cython_context(): return SArray(_proxy=self.__proxy__.transform(fn, dtype, seed))
Transform each row to an :class:`~turicreate.SArray` according to a specified function. Returns a new SArray of ``dtype`` where each element in this SArray is transformed by `fn(x)` where `x` is a single row in the sframe represented as a dictionary. The ``fn`` should return exactly one value which can be cast into type ``dtype``. If ``dtype`` is not specified, the first 100 rows of the SFrame are used to make a guess of the target data type. Parameters ---------- fn : function The function to transform each row of the SFrame. The return type should be convertible to `dtype` if `dtype` is not None. This can also be a toolkit extension function which is compiled as a native shared library using SDK. dtype : dtype, optional The dtype of the new SArray. If None, the first 100 elements of the array are used to guess the target data type. seed : int, optional Used as the seed if a random number generator is included in `fn`. Returns ------- out : SArray The SArray transformed by fn. Each element of the SArray is of type ``dtype`` Examples -------- Concatenate strings from several columns: >>> sf = turicreate.SFrame({'user_id': [1, 2, 3], 'movie_id': [3, 3, 6], 'rating': [4, 5, 1]}) >>> sf.apply(lambda x: str(x['user_id']) + str(x['movie_id']) + str(x['rating'])) dtype: str Rows: 3 ['134', '235', '361']
def get_conn(profile): ''' Return a client object for accessing consul ''' params = {} for key in ('host', 'port', 'token', 'scheme', 'consistency', 'dc', 'verify'): if key in profile: params[key] = profile[key] if HAS_CONSUL: return consul.Consul(**params) else: raise CommandExecutionError( '(unable to import consul, ' 'module most likely not installed. PLease install python-consul)' )
Return a client object for accessing consul
def _get_indexing_dispatch_code(key): """Returns a dispatch code for calling basic or advanced indexing functions.""" if isinstance(key, (NDArray, np.ndarray)): return _NDARRAY_ADVANCED_INDEXING elif isinstance(key, list): # TODO(junwu): Add support for nested lists besides integer list for i in key: if not isinstance(i, integer_types): raise TypeError('Indexing NDArray only supports a list of integers as index' ' when key is of list type, received element=%s of type=%s' % (str(i), str(type(i)))) return _NDARRAY_ADVANCED_INDEXING elif isinstance(key, (integer_types, py_slice)): return _NDARRAY_BASIC_INDEXING elif isinstance(key, tuple): for idx in key: if isinstance(idx, (NDArray, np.ndarray, list, tuple)): return _NDARRAY_ADVANCED_INDEXING elif not isinstance(idx, (py_slice, integer_types)): raise ValueError("NDArray does not support slicing with key %s of type %s." % (str(idx), str(type(idx)))) return _NDARRAY_BASIC_INDEXING else: return _NDARRAY_UNSUPPORTED_INDEXING
Returns a dispatch code for calling basic or advanced indexing functions.
def attach_socket(self, **kwargs): """ Like :py:meth:`attach`, but returns the underlying socket-like object for the HTTP request. Args: params (dict): Dictionary of request parameters (e.g. ``stdout``, ``stderr``, ``stream``). ws (bool): Use websockets instead of raw HTTP. Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ return self.client.api.attach_socket(self.id, **kwargs)
Like :py:meth:`attach`, but returns the underlying socket-like object for the HTTP request. Args: params (dict): Dictionary of request parameters (e.g. ``stdout``, ``stderr``, ``stream``). ws (bool): Use websockets instead of raw HTTP. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
def wait_for_term(): """ Wait until we get killed by a TERM signal (from someone else). """ class Waiter: def __init__(self): self.sleeping = True import signal #@Reimport self.oldhandler = signal.signal(signal.SIGTERM, self._SIGTERMHandler) def _SIGTERMHandler(self, number, frame): self.sleeping = False def sleep(self): while self.sleeping: time.sleep(0.1) waiter = Waiter() waiter.sleep()
Wait until we get killed by a TERM signal (from someone else).
def redirect(self, pid): """Redirect persistent identifier to another persistent identifier. :param pid: The :class:`invenio_pidstore.models.PersistentIdentifier` where redirect the PID. :raises invenio_pidstore.errors.PIDInvalidAction: If the PID is not registered or is not already redirecting to another PID. :raises invenio_pidstore.errors.PIDDoesNotExistError: If PID is not found. :returns: `True` if the PID is successfully redirect. """ if not (self.is_registered() or self.is_redirected()): raise PIDInvalidAction("Persistent identifier is not registered.") try: with db.session.begin_nested(): if self.is_redirected(): r = Redirect.query.get(self.object_uuid) r.pid = pid else: with db.session.begin_nested(): r = Redirect(pid=pid) db.session.add(r) self.status = PIDStatus.REDIRECTED self.object_type = None self.object_uuid = r.id db.session.add(self) except IntegrityError: raise PIDDoesNotExistError(pid.pid_type, pid.pid_value) except SQLAlchemyError: logger.exception("Failed to redirect to {0}".format( pid), extra=dict(pid=self)) raise logger.info("Redirected PID to {0}".format(pid), extra=dict(pid=self)) return True
Redirect persistent identifier to another persistent identifier. :param pid: The :class:`invenio_pidstore.models.PersistentIdentifier` where redirect the PID. :raises invenio_pidstore.errors.PIDInvalidAction: If the PID is not registered or is not already redirecting to another PID. :raises invenio_pidstore.errors.PIDDoesNotExistError: If PID is not found. :returns: `True` if the PID is successfully redirect.
def encrypt(self, data): """ Encrypt the data. Also, update the cipher iv. This is needed for SSLv3 and TLS 1.0. For TLS 1.1/1.2, it is overwritten in TLS.post_build(). """ if False in six.itervalues(self.ready): raise CipherError(data) encryptor = self._cipher.encryptor() tmp = encryptor.update(data) + encryptor.finalize() self.iv = tmp[-self.block_size:] return tmp
Encrypt the data. Also, update the cipher iv. This is needed for SSLv3 and TLS 1.0. For TLS 1.1/1.2, it is overwritten in TLS.post_build().
def use_schema(schema, list_view=False, locations=None): """View decorator for using a marshmallow schema to (1) parse a request's input and (2) serializing the view's output to a JSON response. """ def decorator(func): @functools.wraps(func) def wrapped(*args, **kwargs): use_args_wrapper = parser.use_args(schema, locations=locations) # Function wrapped with use_args func_with_args = use_args_wrapper(func) ret = func_with_args(*args, **kwargs) # Serialize and jsonify the return value return jsonify(schema.dump(ret, many=list_view).data) return wrapped return decorator
View decorator for using a marshmallow schema to (1) parse a request's input and (2) serializing the view's output to a JSON response.
def _find_interfaces_ip(mac): ''' Helper to search the interfaces IPs using the MAC address. ''' try: mac = napalm_helpers.convert(napalm_helpers.mac, mac) except AddrFormatError: return ('', '', []) all_interfaces = _get_mine('net.interfaces') all_ipaddrs = _get_mine('net.ipaddrs') for device, device_interfaces in six.iteritems(all_interfaces): if not device_interfaces.get('result', False): continue for interface, interface_details in six.iteritems(device_interfaces.get('out', {})): try: interface_mac = napalm_helpers.convert(napalm_helpers.mac, interface_details.get('mac_address')) except AddrFormatError: continue if mac != interface_mac: continue interface_ipaddrs = all_ipaddrs.get(device, {}).get('out', {}).get(interface, {}) ip_addresses = interface_ipaddrs.get('ipv4', {}) ip_addresses.update(interface_ipaddrs.get('ipv6', {})) interface_ips = ['{0}/{1}'.format(ip_addr, addr_details.get('prefix_length', '32')) for ip_addr, addr_details in six.iteritems(ip_addresses)] return device, interface, interface_ips return ('', '', [])
Helper to search the interfaces IPs using the MAC address.