positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def _analyse_overview_field(content): ''' Split the field in drbd-overview ''' if "(" in content: # Output like "Connected(2*)" or "UpToDate(2*)" return content.split("(")[0], content.split("(")[0] elif "/" in content: # Output like "Primar/Second" or "UpToDa/UpToDa" return content.split("/")[0], content.split("/")[1] return content, ""
Split the field in drbd-overview
def trigger(self, event, *args): """Triggers the specified event by invoking EventHook.trigger under the hood. @param event: event to trigger. Any object can be passed as event, but string is preferable. If qcore.EnumBase instance is passed, its name is used as event key. @param args: event arguments. @return: self, so calls like this can be chained together. """ event_hook = self.get_or_create(event) event_hook.trigger(*args) return self
Triggers the specified event by invoking EventHook.trigger under the hood. @param event: event to trigger. Any object can be passed as event, but string is preferable. If qcore.EnumBase instance is passed, its name is used as event key. @param args: event arguments. @return: self, so calls like this can be chained together.
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'word') and self.word is not None: _dict['word'] = self.word if hasattr(self, 'sounds_like') and self.sounds_like is not None: _dict['sounds_like'] = self.sounds_like if hasattr(self, 'display_as') and self.display_as is not None: _dict['display_as'] = self.display_as return _dict
Return a json dictionary representing this model.
def get_app_ext(self, loops=float('inf')): """ get_app_ext(loops=float('inf')) Application extention. This part specifies the amount of loops. If loops is 0 or inf, it goes on infinitely. """ if loops == 0 or loops == float('inf'): loops = 2**16 - 1 # bb = "" # application extension should not be used (the extension interprets zero loops to mean an infinite number of loops) Mmm, does not seem to work if True: bb = "\x21\xFF\x0B" # application extension bb += "NETSCAPE2.0" bb += "\x03\x01" bb += int_to_bin(loops) bb += '\x00' # end return bb
get_app_ext(loops=float('inf')) Application extention. This part specifies the amount of loops. If loops is 0 or inf, it goes on infinitely.
def sample(self, sample_indices=None, num_samples=1): """ returns samples according to the KDE Parameters ---------- sample_inices: list of ints Indices into the training data used as centers for the samples num_samples: int if samples_indices is None, this specifies how many samples are drawn. """ if sample_indices is None: sample_indices = np.random.choice(self.data.shape[0], size=num_samples) samples = self.data[sample_indices] possible_steps = np.arange(-self.num_values+1,self.num_values) idx = (np.abs(possible_steps) < 1e-2) ps = 0.5*(1-self.bw) * np.power(self.bw, np.abs(possible_steps)) ps[idx] = (1-self.bw) ps /= ps.sum() delta = np.zeros_like(samples) oob_idx = np.arange(samples.shape[0]) while len(oob_idx) > 0: samples[oob_idx] -= delta[oob_idx] # revert move delta[oob_idx] = np.random.choice(possible_steps, size=len(oob_idx), p=ps) samples[oob_idx] += delta[oob_idx] #import pdb; pdb.set_trace() oob_idx = oob_idx[np.argwhere(np.logical_or(samples[oob_idx] > self.num_values-0.9, samples[oob_idx] < -0.1)).flatten()] return(np.rint(samples))
returns samples according to the KDE Parameters ---------- sample_inices: list of ints Indices into the training data used as centers for the samples num_samples: int if samples_indices is None, this specifies how many samples are drawn.
def GetInput(self): """Yield client urns.""" client_list = GetAllClients(token=self.token) logging.debug("Got %d clients", len(client_list)) for client_group in collection.Batch(client_list, self.client_chunksize): for fd in aff4.FACTORY.MultiOpen( client_group, mode="r", aff4_type=aff4_grr.VFSGRRClient, token=self.token): if isinstance(fd, aff4_grr.VFSGRRClient): # Skip if older than max_age oldest_time = (time.time() - self.max_age) * 1e6 if fd.Get(aff4_grr.VFSGRRClient.SchemaCls.PING) >= oldest_time: yield fd
Yield client urns.
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_blade_swbd(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status") config = logical_chassis_fwdl_status output = ET.SubElement(logical_chassis_fwdl_status, "output") cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries") fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries") blade_swbd = ET.SubElement(fwdl_entries, "blade-swbd") blade_swbd.text = kwargs.pop('blade_swbd') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def list_all(self): """All items""" return list(set( item for items in self._routes.values() for item in items ))
All items
def p_block(p): """block : '{' commands '}' """ # section 3.2: REQUIRE command must come before any other commands, # which means it can't be in the block of another command if any(command.RULE_IDENTIFIER == 'REQUIRE' for command in p[2].commands): print("REQUIRE command not allowed inside of a block (line %d)" % (p.lineno(2))) raise SyntaxError p[0] = p[2]
block : '{' commands '}'
def printMetaDataFor(archive, location): """ Prints metadata for given location. :param archive: CombineArchive instance :param location: :return: """ desc = archive.getMetadataForLocation(location) if desc.isEmpty(): print(" no metadata for '{0}'".format(location)) return None print(" metadata for '{0}':".format(location)) print(" Created : {0}".format(desc.getCreated().getDateAsString())) for i in range(desc.getNumModified()): print(" Modified : {0}".format(desc.getModified(i).getDateAsString())) print(" # Creators: {0}".format(desc.getNumCreators())) for i in range(desc.getNumCreators()): creator = desc.getCreator(i) print(" {0} {1}".format(creator.getGivenName(), creator.getFamilyName()))
Prints metadata for given location. :param archive: CombineArchive instance :param location: :return:
def do_parameter(self,args): """Print a parameter""" parser = CommandArgumentParser("parameter") parser.add_argument(dest="id",help="Parameter to print") args = vars(parser.parse_args(args)) print "printing parameter {}".format(args['id']) try: index = int(args['id']) parameter = self.wrappedStack['resourcesByTypeName']['parameters'][index] except ValueError: parameter = self.wrappedStack['resourcesByTypeName']['parameters'][args['id']] print(parameter.resource_status)
Print a parameter
def configure_app(app): """Configure Flask/Celery application. * Rio will find environment variable `RIO_SETTINGS` first:: $ export RIO_SETTINGS=/path/to/settings.cfg $ rio worker * If `RIO_SETTINGS` is missing, Rio will try to load configuration module in `rio.settings` according to another environment variable `RIO_ENV`. Default load `rio.settings.dev`. $ export RIO_ENV=prod $ rio worker """ app.config_from_object('rio.settings.default') if environ.get('RIO_SETTINGS'): app.config_from_envvar('RIO_SETTINGS') return config_map = { 'dev': 'rio.settings.dev', 'stag': 'rio.settings.stag', 'prod': 'rio.settings.prod', 'test': 'rio.settings.test', } rio_env = environ.get('RIO_ENV', 'dev') config = config_map.get(rio_env, config_map['dev']) app.config_from_object(config)
Configure Flask/Celery application. * Rio will find environment variable `RIO_SETTINGS` first:: $ export RIO_SETTINGS=/path/to/settings.cfg $ rio worker * If `RIO_SETTINGS` is missing, Rio will try to load configuration module in `rio.settings` according to another environment variable `RIO_ENV`. Default load `rio.settings.dev`. $ export RIO_ENV=prod $ rio worker
def windows_install(path_to_python=""): """ Sets the .py extension to be associated with the ftype Python which is then set to the python.exe you provide in the path_to_python variable or after the -p flag if run as a script. Once the python environment is set up the function proceeds to set PATH and PYTHONPATH using setx. Parameters ---------- path_to_python : the path the python.exe you want windows to execute when running .py files """ if not path_to_python: print("Please enter the path to your python.exe you wish Windows to use to run python files. If you do not, this script will not be able to set up a full python environment in Windows. If you already have a python environment set up in Windows such that you can run python scripts from command prompt with just a file name then ignore this message. Otherwise, you will need to run dev_setup.py again with the command line option '-p' followed by the correct full path to python.\nRun dev_setup.py with the -h flag for more details") print("Would you like to continue? [y/N] ") ans = input() if ans == 'y': pass else: return # be sure to add python.exe if the user forgets to include the file name if os.path.isdir(path_to_python): path_to_python = os.path.join(path_to_python, "python.exe") if not os.path.isfile(path_to_python): print("The path to python provided is not a full path to the python.exe file or this path does not exist, was given %s.\nPlease run again with the command line option '-p' followed by the correct full path to python.\nRun dev_setup.py with the -h flag for more details" % path_to_python) return # make windows associate .py with python subprocess.check_call('assoc .py=Python', shell=True) subprocess.check_call('ftype Python=%s ' % path_to_python + '"%1" %*', shell=True) PmagPyDir = os.path.abspath(".") ProgramsDir = os.path.join(PmagPyDir, 'programs') dirs_to_add = [ProgramsDir] for d in next(os.walk(ProgramsDir))[1]: dirs_to_add.append(os.path.join(ProgramsDir, d)) path = str(subprocess.check_output('echo %PATH%', shell=True)).strip('\n') if "PATH" in path: path = '' pypath = str(subprocess.check_output( 'echo %PYTHONPATH%', shell=True)).strip('\n') if "PYTHONPATH" in pypath: pypath = PmagPyDir + ';' + ProgramsDir else: pypath += ';' + PmagPyDir + ';' + ProgramsDir for d_add in dirs_to_add: path += ';' + d_add unique_path_list = [] for p in path.split(';'): p = p.replace('"', '') if p not in unique_path_list: unique_path_list.append(p) unique_pypath_list = [] for p in pypath.split(';'): p = p.replace('"', '') if p not in unique_pypath_list: unique_pypath_list.append(p) path = functools.reduce(lambda x, y: x + ';' + y, unique_path_list) pypath = functools.reduce(lambda x, y: x + ';' + y, unique_pypath_list) print('setx PATH "%s"' % path) subprocess.call('setx PATH "%s"' % path, shell=True) print('setx PYTHONPATH "%s"' % pypath) subprocess.call('setx PYTHONPATH "%s"' % (pypath), shell=True) print("Install complete. Please restart the command prompt to complete install")
Sets the .py extension to be associated with the ftype Python which is then set to the python.exe you provide in the path_to_python variable or after the -p flag if run as a script. Once the python environment is set up the function proceeds to set PATH and PYTHONPATH using setx. Parameters ---------- path_to_python : the path the python.exe you want windows to execute when running .py files
def get_item_sh(self, item, roles=None, date_field=None): """Add sorting hat enrichment fields""" eitem_sh = {} created = str_to_datetime(date_field) for rol in roles: identity = self.get_sh_identity(item, rol) eitem_sh.update(self.get_item_sh_fields(identity, created, rol=rol)) if not eitem_sh[rol + '_org_name']: eitem_sh[rol + '_org_name'] = SH_UNKNOWN_VALUE if not eitem_sh[rol + '_name']: eitem_sh[rol + '_name'] = SH_UNKNOWN_VALUE if not eitem_sh[rol + '_user_name']: eitem_sh[rol + '_user_name'] = SH_UNKNOWN_VALUE # Add the author field common in all data sources if rol == self.get_field_author(): identity = self.get_sh_identity(item, rol) eitem_sh.update(self.get_item_sh_fields(identity, created, rol="author")) if not eitem_sh['author_org_name']: eitem_sh['author_org_name'] = SH_UNKNOWN_VALUE if not eitem_sh['author_name']: eitem_sh['author_name'] = SH_UNKNOWN_VALUE if not eitem_sh['author_user_name']: eitem_sh['author_user_name'] = SH_UNKNOWN_VALUE return eitem_sh
Add sorting hat enrichment fields
def slicer(document, first_page=None, last_page=None, suffix='sliced', tempdir=None): """Slice a PDF document to remove pages.""" # Set output file name if tempdir: with NamedTemporaryFile(suffix='.pdf', dir=tempdir, delete=False) as temp: output = temp.name elif suffix: output = os.path.join(os.path.dirname(document), add_suffix(document, suffix)) else: with NamedTemporaryFile(suffix='.pdf') as temp: output = temp.name # Reindex page selections for simple user input first_page = first_page - 1 if not None else None # Validate page range by comparing selection to number of pages in PDF document pages = Info(document).pages invalid = 'Number of pages: ' + str(pages) + ' ----> Page Range Input: ' + str(first_page) + '-' + str(last_page) assert first_page <= last_page <= pages, invalid pdf = PdfFileReader(document) writer = PdfFileWriter() pages = list(range(pdf.getNumPages()))[first_page:last_page] for page in pages: writer.addPage(pdf.getPage(page)) with open(output, 'wb') as out: writer.write(out) return output
Slice a PDF document to remove pages.
def decoded_output_boxes(self): """ Returns: Nx#classx4 """ ret = self._cascade_boxes[-1] ret = tf.expand_dims(ret, 1) # class-agnostic return tf.tile(ret, [1, self.num_classes, 1])
Returns: Nx#classx4
def release_to_branch(self, release_id): """ Shortcut to translate a release identifier to a branch name. :param release_id: A :attr:`Release.identifier` value (a string). :returns: A branch name (a string). :raises: :exc:`~exceptions.TypeError` when :attr:`release_scheme` isn't 'branches'. """ self.ensure_release_scheme('branches') return self.releases[release_id].revision.branch
Shortcut to translate a release identifier to a branch name. :param release_id: A :attr:`Release.identifier` value (a string). :returns: A branch name (a string). :raises: :exc:`~exceptions.TypeError` when :attr:`release_scheme` isn't 'branches'.
def _setup_gc2_framework(self): """ This method establishes the GC2 framework for a multi-segment (and indeed multi-typology) case based on the description in Spudich & Chiou (2015) - see section on Generalized Coordinate System for Multiple Rupture Traces """ # Generate cartesian edge set edge_sets = self._get_cartesian_edge_set() self.gc2_config = {} # Determine furthest two points apart endpoint_set = numpy.vstack([cep for cep in self.cartesian_endpoints]) dmat = squareform(pdist(endpoint_set)) irow, icol = numpy.unravel_index(numpy.argmax(dmat), dmat.shape) # Join further points to form a vector (a_hat in Spudich & Chiou) # According to Spudich & Chiou, a_vec should be eastward trending if endpoint_set[irow, 0] > endpoint_set[icol, 0]: # Row point is to the east of column point beginning = endpoint_set[icol, :2] ending = endpoint_set[irow, :2] else: # Column point is to the east of row point beginning = endpoint_set[irow, :2] ending = endpoint_set[icol, :2] # Convert to unit vector a_vec = ending - beginning self.gc2_config["a_hat"] = a_vec / numpy.linalg.norm(a_vec) # Get e_j set self.gc2_config["ejs"] = [] for c_edges in self.cartesian_edges: self.gc2_config["ejs"].append( numpy.dot(c_edges[-1, :2] - c_edges[0, :2], self.gc2_config["a_hat"])) # A "total E" is defined as the sum of the e_j values self.gc2_config["e_tot"] = sum(self.gc2_config["ejs"]) sign_etot = numpy.sign(self.gc2_config["e_tot"]) b_vec = numpy.zeros(2) self.gc2_config["sign"] = [] for i, c_edges in enumerate(self.cartesian_edges): segment_sign = numpy.sign(self.gc2_config["ejs"][i]) * sign_etot self.gc2_config["sign"].append(segment_sign) if segment_sign < 0: # Segment is discordant - reverse the points c_edges = numpy.flipud(c_edges) self.cartesian_edges[i] = c_edges self.cartesian_endpoints[i] = numpy.flipud( self.cartesian_endpoints[i]) b_vec += (c_edges[-1, :2] - c_edges[0, :2]) # Get unit vector self.gc2_config["b_hat"] = b_vec / numpy.linalg.norm(b_vec) if numpy.dot(a_vec, self.gc2_config["b_hat"]) >= 0.0: self.p0 = beginning else: self.p0 = ending # To later calculate Ry0 it is necessary to determine the maximum # GC2-U coordinate for the fault self._get_gc2_coordinates_for_rupture(edge_sets)
This method establishes the GC2 framework for a multi-segment (and indeed multi-typology) case based on the description in Spudich & Chiou (2015) - see section on Generalized Coordinate System for Multiple Rupture Traces
def _read_msg(self): """read message from server""" # # NOTE: # '_recv_socket(nbytes)' was implemented as # 'socket.recv(nbytes, socket.MSG_WAITALL)' # but socket.MSG_WAITALL proved not reliable # def _recv_socket(nbytes): """read nbytes bytes from self.socket""" # # code below is written under the assumption that # 'nbytes' is smallish so that the 'while len(buf) < nbytes' loop # is entered rarerly # try: buf = self.socket.recv(nbytes) except IOError as err: raise ConnError(*err.args) if not buf: raise ShortRead(0, nbytes) while len(buf) < nbytes: try: tmp = self.socket.recv(nbytes - len(buf)) except IOError as err: raise ConnError(*err.args) if not tmp: if self.verbose: print('ee', repr(buf)) raise ShortRead(len(buf), nbytes) buf += tmp assert len(buf) == nbytes, (buf, len(buf), nbytes) return buf data = _recv_socket(_FromServerHeader.header_size) header = _FromServerHeader(data) if self.verbose: print('<-', repr(header)) # error conditions if header.version != 0: raise MalformedHeader('bad version', header) if header.payload > MAX_PAYLOAD: raise MalformedHeader('huge payload, unwilling to read', header) if header.payload > 0: payload = _recv_socket(header.payload) if self.verbose: print('..', repr(payload)) assert header.size <= header.payload payload = payload[:header.size] else: payload = bytes() return header, payload
read message from server
def airline_delay(data_set='airline_delay', num_train=700000, num_test=100000, seed=default_seed): """Airline delay data used in Gaussian Processes for Big Data by Hensman, Fusi and Lawrence""" if not data_available(data_set): download_data(data_set) dir_path = os.path.join(data_path, data_set) filename = os.path.join(dir_path, 'filtered_data.pickle') # 1. Load the dataset import pandas as pd data = pd.read_pickle(filename) # WARNING: removing year data.pop('Year') # Get data matrices Yall = data.pop('ArrDelay').values[:,None] Xall = data.values # Subset the data (memory!!) all_data = num_train+num_test Xall = Xall[:all_data] Yall = Yall[:all_data] # Get testing points np.random.seed(seed=seed) N_shuffled = permute(Yall.shape[0]) train, test = N_shuffled[num_test:], N_shuffled[:num_test] X, Y = Xall[train], Yall[train] Xtest, Ytest = Xall[test], Yall[test] covariates = ['month', 'day of month', 'day of week', 'departure time', 'arrival time', 'air time', 'distance to travel', 'age of aircraft / years'] response = ['delay'] return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'seed' : seed, 'info': "Airline delay data used for demonstrating Gaussian processes for big data.", 'covariates': covariates, 'response': response}, data_set)
Airline delay data used in Gaussian Processes for Big Data by Hensman, Fusi and Lawrence
def get_job_list(self, project_name): """ Get the list of pending, running and finished jobs of some project. :param project_name: the project name :return: a dictionary that list inculde job name and status example: {"status": "ok", "pending": [{"id": "78391cc0fcaf11e1b0090800272a6d06", "spider": "spider1"}], "running": [{"id": "422e608f9f28cef127b3d5ef93fe9399", "spider": "spider2", "start_time": "2012-09-12 10:14:03.594664"}], "finished": [{"id": "2f16646cfcaf11e1b0090800272a6d06", "spider": "spider3", "start_time": "2012-09-12 10:14:03.594664", "end_time": "2012-09-12 10:24:03.594664"}]} """ url, method = self.command_set['listjobs'][0], self.command_set['listjobs'][1] data = {'project': project_name} response = http_utils.request(url, method_type=method, data=data, return_type=http_utils.RETURN_JSON) if response is None: logging.warning('%s failure: not found or connection fail' % sys._getframe().f_code.co_name) response = JobList().__dict__ return response
Get the list of pending, running and finished jobs of some project. :param project_name: the project name :return: a dictionary that list inculde job name and status example: {"status": "ok", "pending": [{"id": "78391cc0fcaf11e1b0090800272a6d06", "spider": "spider1"}], "running": [{"id": "422e608f9f28cef127b3d5ef93fe9399", "spider": "spider2", "start_time": "2012-09-12 10:14:03.594664"}], "finished": [{"id": "2f16646cfcaf11e1b0090800272a6d06", "spider": "spider3", "start_time": "2012-09-12 10:14:03.594664", "end_time": "2012-09-12 10:24:03.594664"}]}
def visit_keyword(self, node): """return an astroid.Keyword node as string""" if node.arg is None: return "**%s" % node.value.accept(self) return "%s=%s" % (node.arg, node.value.accept(self))
return an astroid.Keyword node as string
def cutoff_filename(prefix, suffix, input_str): """ Cuts off the start and end of a string, as specified by 2 parameters Parameters ---------- prefix : string, if input_str starts with prefix, will cut off prefix suffix : string, if input_str end with suffix, will cut off suffix input_str : the string to be processed Returns ------- A string, from which the start and end have been cut """ if prefix is not '': if input_str.startswith(prefix): input_str = input_str[len(prefix):] if suffix is not '': if input_str.endswith(suffix): input_str = input_str[:-len(suffix)] return input_str
Cuts off the start and end of a string, as specified by 2 parameters Parameters ---------- prefix : string, if input_str starts with prefix, will cut off prefix suffix : string, if input_str end with suffix, will cut off suffix input_str : the string to be processed Returns ------- A string, from which the start and end have been cut
def delete_namespace(parsed_xml): """ Identifies the namespace associated with the root node of a XML document and removes that names from the document. :param parsed_xml: lxml.Etree object. :return: Returns the sources document with the namespace removed. """ if parsed_xml.getroot().tag.startswith('{'): root = parsed_xml.getroot().tag end_ns = root.find('}') remove_namespace(parsed_xml, root[1:end_ns]) return parsed_xml
Identifies the namespace associated with the root node of a XML document and removes that names from the document. :param parsed_xml: lxml.Etree object. :return: Returns the sources document with the namespace removed.
def select_many_with_index( self, collection_selector=IndexedElement, result_selector=lambda source_element, collection_element: collection_element): '''Projects each element of a sequence to an intermediate new sequence, incorporating the index of the element, flattens the resulting sequence into one sequence and optionally transforms the flattened sequence using a selector function. Note: This method uses deferred execution. Args: collection_selector: A binary function mapping each element of the source sequence into an intermediate sequence, by incorporating its index in the source sequence. The two positional arguments to the function are the zero-based index of the source element and the value of the element. The result of the function should be an iterable derived from the index and element value. If no collection_selector is provided, the elements of the intermediate sequence will consist of tuples of (index, element) from the source sequence. result_selector: An optional binary function mapping the elements in the flattened intermediate sequence together with their corresponding source elements to elements of the result sequence. The two positional arguments of the result_selector are, first the source element corresponding to an element from the intermediate sequence, and second the actual element from the intermediate sequence. The return value should be the corresponding value in the result sequence. If no result_selector function is provided, the elements of the flattened intermediate sequence are returned untransformed. Returns: A Queryable over a generated sequence whose elements are the result of applying the one-to-many collection_selector to each element of the source sequence which incorporates both the index and value of the source element, concatenating the results into an intermediate sequence, and then mapping each of those elements through the result_selector into the result sequence. Raises: ValueError: If this Queryable has been closed. TypeError: If projector [and selector] are not callable. ''' if self.closed(): raise ValueError("Attempt to call select_many_with_index() on a " "closed Queryable.") if not is_callable(collection_selector): raise TypeError("select_many_with_index() parameter " "projector={0} is not callable".format(repr(collection_selector))) if not is_callable(result_selector): raise TypeError("select_many_with_index() parameter " "selector={0} is not callable".format(repr(result_selector))) return self._create( self._generate_select_many_with_index(collection_selector, result_selector))
Projects each element of a sequence to an intermediate new sequence, incorporating the index of the element, flattens the resulting sequence into one sequence and optionally transforms the flattened sequence using a selector function. Note: This method uses deferred execution. Args: collection_selector: A binary function mapping each element of the source sequence into an intermediate sequence, by incorporating its index in the source sequence. The two positional arguments to the function are the zero-based index of the source element and the value of the element. The result of the function should be an iterable derived from the index and element value. If no collection_selector is provided, the elements of the intermediate sequence will consist of tuples of (index, element) from the source sequence. result_selector: An optional binary function mapping the elements in the flattened intermediate sequence together with their corresponding source elements to elements of the result sequence. The two positional arguments of the result_selector are, first the source element corresponding to an element from the intermediate sequence, and second the actual element from the intermediate sequence. The return value should be the corresponding value in the result sequence. If no result_selector function is provided, the elements of the flattened intermediate sequence are returned untransformed. Returns: A Queryable over a generated sequence whose elements are the result of applying the one-to-many collection_selector to each element of the source sequence which incorporates both the index and value of the source element, concatenating the results into an intermediate sequence, and then mapping each of those elements through the result_selector into the result sequence. Raises: ValueError: If this Queryable has been closed. TypeError: If projector [and selector] are not callable.
def draw(self): ''' Draws samples from the `true` distribution. Returns: `np.ndarray` of samples. ''' observed_arr = self.__image_true_sampler.draw() observed_arr = self.add_condition(observed_arr) return observed_arr
Draws samples from the `true` distribution. Returns: `np.ndarray` of samples.
def _scale_back_response(bqm, response, scalar, ignored_interactions, ignored_variables, ignore_offset): """Helper function to scale back the response of sample method""" if len(ignored_interactions) + len( ignored_variables) + ignore_offset == 0: response.record.energy = np.divide(response.record.energy, scalar) else: response.record.energy = bqm.energies((response.record.sample, response.variables)) return response
Helper function to scale back the response of sample method
def file_exists(self, filename, shutit_pexpect_child=None, directory=False, note=None, loglevel=logging.DEBUG): """Return True if file exists on the target host, else False @param filename: Filename to determine the existence of. @param shutit_pexpect_child: See send() @param directory: Indicate that the file is a directory. @param note: See send() @type filename: string @type directory: boolean @rtype: boolean """ shutit_global.shutit_global_object.yield_to_draw() shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child) return shutit_pexpect_session.file_exists(filename=filename,directory=directory,note=note,loglevel=loglevel)
Return True if file exists on the target host, else False @param filename: Filename to determine the existence of. @param shutit_pexpect_child: See send() @param directory: Indicate that the file is a directory. @param note: See send() @type filename: string @type directory: boolean @rtype: boolean
def stream(self, status=values.unset, phone_number=values.unset, incoming_phone_number_sid=values.unset, friendly_name=values.unset, unique_name=values.unset, limit=None, page_size=None): """ Streams DependentHostedNumberOrderInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param DependentHostedNumberOrderInstance.Status status: The Status of this HostedNumberOrder. :param unicode phone_number: An E164 formatted phone number. :param unicode incoming_phone_number_sid: IncomingPhoneNumber sid. :param unicode friendly_name: A human readable description of this resource. :param unicode unique_name: A unique, developer assigned name of this HostedNumberOrder. :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderInstance] """ limits = self._version.read_limits(limit, page_size) page = self.page( status=status, phone_number=phone_number, incoming_phone_number_sid=incoming_phone_number_sid, friendly_name=friendly_name, unique_name=unique_name, page_size=limits['page_size'], ) return self._version.stream(page, limits['limit'], limits['page_limit'])
Streams DependentHostedNumberOrderInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param DependentHostedNumberOrderInstance.Status status: The Status of this HostedNumberOrder. :param unicode phone_number: An E164 formatted phone number. :param unicode incoming_phone_number_sid: IncomingPhoneNumber sid. :param unicode friendly_name: A human readable description of this resource. :param unicode unique_name: A unique, developer assigned name of this HostedNumberOrder. :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderInstance]
def remover_grupo(self, id_equipamento, id_grupo): """Remove a associação de um equipamento com um grupo de equipamento. :param id_equipamento: Identificador do equipamento. :param id_grupo: Identificador do grupo de equipamento. :return: None :raise EquipamentoGrupoNaoExisteError: Associação entre grupo e equipamento não cadastrada. :raise EquipamentoNaoExisteError: Equipamento não cadastrado. :raise EquipmentDontRemoveError: Failure to remove an association between an equipment and a group because the group is related only to a group. :raise InvalidParameterError: O identificador do equipamento e/ou do grupo são nulos ou inválidos. :raise DataBaseError: Falha na networkapi ao acessar o banco de dados. :raise XMLError: Falha na networkapi ao gerar o XML de resposta. """ if not is_valid_int_param(id_equipamento): raise InvalidParameterError( u'O identificador do equipamento é inválido ou não foi informado.') if not is_valid_int_param(id_grupo): raise InvalidParameterError( u'O identificador do grupo é inválido ou não foi informado.') url = 'equipamentogrupo/equipamento/' + \ str(id_equipamento) + '/egrupo/' + str(id_grupo) + '/' code, xml = self.submit(None, 'DELETE', url) return self.response(code, xml)
Remove a associação de um equipamento com um grupo de equipamento. :param id_equipamento: Identificador do equipamento. :param id_grupo: Identificador do grupo de equipamento. :return: None :raise EquipamentoGrupoNaoExisteError: Associação entre grupo e equipamento não cadastrada. :raise EquipamentoNaoExisteError: Equipamento não cadastrado. :raise EquipmentDontRemoveError: Failure to remove an association between an equipment and a group because the group is related only to a group. :raise InvalidParameterError: O identificador do equipamento e/ou do grupo são nulos ou inválidos. :raise DataBaseError: Falha na networkapi ao acessar o banco de dados. :raise XMLError: Falha na networkapi ao gerar o XML de resposta.
def list_check(*args, func=None): """Check if arguments are list type.""" func = func or inspect.stack()[2][3] for var in args: if not isinstance(var, (list, collections.UserList, collections.abc.MutableSequence)): name = type(var).__name__ raise ListError( f'Function {func} expected list, {name} got instead.')
Check if arguments are list type.
def _check_wait_input_flag(self): """ Returns a function to stop the search of the investigated node of the ArciDispatch algorithm. :return: A function to stop the search. :rtype: (bool, str) -> bool """ wf_pred = self._wf_pred # Namespace shortcuts. pred = {k: set(v).issubset for k, v in self._pred.items()} if self._wait_in: we = self._wait_in.get # Namespace shortcut. def check_wait_input_flag(wait_in, n_id): """ Stops the search of the investigated node of the ArciDispatch algorithm, until all inputs are satisfied. :param wait_in: If True the node is waiting input estimations. :type wait_in: bool :param n_id: Data or function node id. :type n_id: str :return: True if all node inputs are satisfied, otherwise False. :rtype: bool """ # Return true if the node inputs are satisfied. if we(n_id, wait_in): return not pred[n_id](wf_pred[n_id]) return False else: def check_wait_input_flag(wait_in, n_id): # Return true if the node inputs are satisfied. return wait_in and not pred[n_id](wf_pred[n_id]) return check_wait_input_flag
Returns a function to stop the search of the investigated node of the ArciDispatch algorithm. :return: A function to stop the search. :rtype: (bool, str) -> bool
def process_module(self, yam): """Process data nodes, RPCs and notifications in a single module.""" for ann in yam.search(("ietf-yang-metadata", "annotation")): self.process_annotation(ann) for ch in yam.i_children[:]: if ch.keyword == "rpc": self.process_rpc(ch) elif ch.keyword == "notification": self.process_notification(ch) else: continue yam.i_children.remove(ch) self.process_children(yam, "//nc:*", 1)
Process data nodes, RPCs and notifications in a single module.
def forward_all_signals_async(target_pid, process_name): """Install all signal handler that forwards all signals to the given process.""" def forwarding_signal_handler(signum): _forward_signal(signum, process_name, forwarding_signal_handler.target_pid) # Somehow we get a Python SystemError sometimes if we access target_pid directly from inside function. forwarding_signal_handler.target_pid = target_pid for signum in _FORWARDABLE_SIGNALS: # Need to directly access libc function, # the state of the signal module is incorrect due to the clone() # (it may think we are in a different thread than the main thread). libc.signal(signum, forwarding_signal_handler) # Reactivate delivery of signals such that our handler gets called. reset_signal_handling()
Install all signal handler that forwards all signals to the given process.
def sudo_required(func): """ Enforces a view to have elevated privileges. Should likely be paired with ``@login_required``. >>> @sudo_required >>> def secure_page(request): >>> ... """ @wraps(func) def inner(request, *args, **kwargs): if not request.is_sudo(): return redirect_to_sudo(request.get_full_path()) return func(request, *args, **kwargs) return inner
Enforces a view to have elevated privileges. Should likely be paired with ``@login_required``. >>> @sudo_required >>> def secure_page(request): >>> ...
def is_expired(self): """ ``True`` if the signature has an expiration date, and is expired. Otherwise, ``False`` """ expires_at = self.expires_at if expires_at is not None and expires_at != self.created: return expires_at < datetime.utcnow() return False
``True`` if the signature has an expiration date, and is expired. Otherwise, ``False``
def overlay_gateway_access_lists_ipv6_in_cg_ipv6_acl_in_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels") name_key = ET.SubElement(overlay_gateway, "name") name_key.text = kwargs.pop('name') access_lists = ET.SubElement(overlay_gateway, "access-lists") ipv6 = ET.SubElement(access_lists, "ipv6") in_cg = ET.SubElement(ipv6, "in") ipv6_acl_in_name = ET.SubElement(in_cg, "ipv6-acl-in-name") ipv6_acl_in_name.text = kwargs.pop('ipv6_acl_in_name') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
async def Create(self, notes): ''' notes : str Returns -> typing.Union[str, int, _ForwardRef('Number')] ''' # map input types to rpc msg _params = dict() msg = dict(type='Backups', request='Create', version=2, params=_params) _params['notes'] = notes reply = await self.rpc(msg) return reply
notes : str Returns -> typing.Union[str, int, _ForwardRef('Number')]
def destroy(self, names): ''' Destroy the named VMs ''' mapper = salt.cloud.Map(self._opts_defaults(destroy=True)) if isinstance(names, six.string_types): names = names.split(',') return salt.utils.data.simple_types_filter( mapper.destroy(names) )
Destroy the named VMs
def _parse_storage_embedded_health(self, data): """Gets the storage data from get_embedded_health Parse the get_host_health_data() for essential properties :param data: the output returned by get_host_health_data() :returns: disk size in GB. """ local_gb = 0 storage = self.get_value_as_list(data['GET_EMBEDDED_HEALTH_DATA'], 'STORAGE') if storage is None: # We dont raise exception because this dictionary # is available only when RAID is configured. # If we raise error here then we will always fail # inspection where this module is consumed. Hence # as a workaround just return 0. return local_gb minimum = local_gb for item in storage: cntlr = self.get_value_as_list(item, 'CONTROLLER') if cntlr is None: continue for s in cntlr: drive = self.get_value_as_list(s, 'LOGICAL_DRIVE') if drive is None: continue for item in drive: for key, val in item.items(): if key == 'CAPACITY': capacity = val['VALUE'] local_bytes = (strutils.string_to_bytes( capacity.replace(' ', ''), return_int=True)) local_gb = int(local_bytes / (1024 * 1024 * 1024)) if minimum >= local_gb or minimum == 0: minimum = local_gb # Return disk size 1 less than the actual disk size. This prevents # the deploy to fail from Nova when root_gb is same as local_gb # in Ironic. When the disk size is used as root_device hints, # then it should be given as the actual size i.e. # ironic (node.properties['local_gb'] + 1) else root device # hint will fail. if minimum: minimum = minimum - 1 return minimum
Gets the storage data from get_embedded_health Parse the get_host_health_data() for essential properties :param data: the output returned by get_host_health_data() :returns: disk size in GB.
def full(self, name, fill_value, **kwargs): """Create an array. Keyword arguments as per :func:`zarr.creation.full`.""" return self._write_op(self._full_nosync, name, fill_value, **kwargs)
Create an array. Keyword arguments as per :func:`zarr.creation.full`.
def watch_logfile(self, logfile_path): """Analyzes queries from the tail of a given log file""" self._run_stats['logSource'] = logfile_path log_parser = LogParser() # For each new line in the logfile ... output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS try: firstLine = True for line in self._tail_file(open(logfile_path), WATCH_INTERVAL_SECONDS): if firstLine: self._run_stats['timeRange']['start'] = get_line_time(line) self._process_query(line, log_parser) self._run_stats['timeRange']['end'] = get_line_time(line) if time.time() >= output_time: self._output_aggregated_report(sys.stderr) output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS except KeyboardInterrupt: sys.stderr.write("Interrupt received\n") finally: self._output_aggregated_report(sys.stdout) return 0
Analyzes queries from the tail of a given log file
def min(self, spec): """Adds `min` operator that specifies lower bound for specific index. :Parameters: - `spec`: a list of field, limit pairs specifying the inclusive lower bound for all keys of a specific index in order. .. versionadded:: 2.7 """ if not isinstance(spec, (list, tuple)): raise TypeError("spec must be an instance of list or tuple") self.__check_okay_to_chain() self.__min = SON(spec) return self
Adds `min` operator that specifies lower bound for specific index. :Parameters: - `spec`: a list of field, limit pairs specifying the inclusive lower bound for all keys of a specific index in order. .. versionadded:: 2.7
def list_permissions(username=None, resource=None, resource_type='keyspace', permission=None, contact_points=None, port=None, cql_user=None, cql_pass=None): ''' List permissions. :param username: The name of the user to list permissions for. :type username: str :param resource: The resource (keyspace or table), if None, permissions for all resources are listed. :type resource: str :param resource_type: The resource_type (keyspace or table), defaults to 'keyspace'. :type resource_type: str :param permission: A permission name (e.g. select), if None, all permissions are listed. :type permission: str :param contact_points: The Cassandra cluster addresses, can either be a string or a list of IPs. :type contact_points: str | list[str] :param cql_user: The Cassandra user if authentication is turned on. :type cql_user: str :param cql_pass: The Cassandra user password if authentication is turned on. :type cql_pass: str :param port: The Cassandra cluster port, defaults to None. :type port: int :return: Dictionary of permissions. :rtype: dict CLI Example: .. code-block:: bash salt 'minion1' cassandra_cql.list_permissions salt 'minion1' cassandra_cql.list_permissions username=joe resource=test_keyspace permission=select salt 'minion1' cassandra_cql.list_permissions username=joe resource=test_table resource_type=table \ permission=select contact_points=minion1 ''' keyspace_cql = "{0} {1}".format(resource_type, resource) if resource else "all keyspaces" permission_cql = "{0} permission".format(permission) if permission else "all permissions" query = "list {0} on {1}".format(permission_cql, keyspace_cql) if username: query = "{0} of {1}".format(query, username) log.debug("Attempting to list permissions with query '%s'", query) ret = {} try: ret = cql_query(query, contact_points, port, cql_user, cql_pass) except CommandExecutionError: log.critical('Could not list permissions.') raise except BaseException as e: log.critical('Unexpected error while listing permissions: %s', e) raise return ret
List permissions. :param username: The name of the user to list permissions for. :type username: str :param resource: The resource (keyspace or table), if None, permissions for all resources are listed. :type resource: str :param resource_type: The resource_type (keyspace or table), defaults to 'keyspace'. :type resource_type: str :param permission: A permission name (e.g. select), if None, all permissions are listed. :type permission: str :param contact_points: The Cassandra cluster addresses, can either be a string or a list of IPs. :type contact_points: str | list[str] :param cql_user: The Cassandra user if authentication is turned on. :type cql_user: str :param cql_pass: The Cassandra user password if authentication is turned on. :type cql_pass: str :param port: The Cassandra cluster port, defaults to None. :type port: int :return: Dictionary of permissions. :rtype: dict CLI Example: .. code-block:: bash salt 'minion1' cassandra_cql.list_permissions salt 'minion1' cassandra_cql.list_permissions username=joe resource=test_keyspace permission=select salt 'minion1' cassandra_cql.list_permissions username=joe resource=test_table resource_type=table \ permission=select contact_points=minion1
def key_by(self, to_key: Callable[[T], str]) -> 'TDict[T]': """ :param to_key: value -> key Usage: >>> TList(['a1', 'b2', 'c3']).key_by(lambda x: x[0]).to_json() '{"a": "a1","b": "b2","c": "c3"}' >>> TList([1, 2, 3, 4, 5]).key_by(lambda x: x % 2).to_json() '{"0": 4,"1": 5}' """ return TDict({to_key(x): x for x in self})
:param to_key: value -> key Usage: >>> TList(['a1', 'b2', 'c3']).key_by(lambda x: x[0]).to_json() '{"a": "a1","b": "b2","c": "c3"}' >>> TList([1, 2, 3, 4, 5]).key_by(lambda x: x % 2).to_json() '{"0": 4,"1": 5}'
def get_copy_token( self, bucket: str, key: str, cloud_checksum: str, ) -> typing.Any: """ Given a bucket, key, and the expected cloud-provided checksum, retrieve a token that can be passed into :func:`~cloud_blobstore.BlobStore.copy` that guarantees the copy refers to the same version of the blob identified by the checksum. :param bucket: the bucket the object resides in. :param key: the key of the object for which checksum is being retrieved. :param cloud_checksum: the expected cloud-provided checksum. :return: an opaque copy token """ raise NotImplementedError()
Given a bucket, key, and the expected cloud-provided checksum, retrieve a token that can be passed into :func:`~cloud_blobstore.BlobStore.copy` that guarantees the copy refers to the same version of the blob identified by the checksum. :param bucket: the bucket the object resides in. :param key: the key of the object for which checksum is being retrieved. :param cloud_checksum: the expected cloud-provided checksum. :return: an opaque copy token
def get_index_translog_disable_flush(self): """Return a dictionary showing the position of the 'translog.disable_flush' knob for each index in the cluster. The dictionary will look like this: { "index1": True, # Autoflushing DISABLED "index2": False, # Autoflushing ENABLED "index3": "unknown", # Using default setting (probably enabled) ... } """ disabled = {} settings = self.get('/_settings') setting_getters = [ lambda s: s['index.translog.disable_flush'], lambda s: s['index']['translog']['disable_flush']] for idx in settings: idx_settings = settings[idx]['settings'] for getter in setting_getters: try: disabled[idx] = booleanise(getter(idx_settings)) except KeyError as e: pass if not idx in disabled: disabled[idx] = 'unknown' return disabled
Return a dictionary showing the position of the 'translog.disable_flush' knob for each index in the cluster. The dictionary will look like this: { "index1": True, # Autoflushing DISABLED "index2": False, # Autoflushing ENABLED "index3": "unknown", # Using default setting (probably enabled) ... }
def get_smtp_mail(self): """ Returns the SMTP formatted email, as it may be passed to sendmail. :rtype: string :return: The SMTP formatted mail. """ header = self.get_smtp_header() body = self.get_body().replace('\n', '\r\n') return header + '\r\n' + body + '\r\n'
Returns the SMTP formatted email, as it may be passed to sendmail. :rtype: string :return: The SMTP formatted mail.
def roll_mean(input, window): '''Apply a rolling mean function to an array. This is a simple rolling aggregation.''' nobs, i, j, sum_x = 0,0,0,0. N = len(input) if window > N: raise ValueError('Out of bound') output = np.ndarray(N-window+1,dtype=input.dtype) for val in input[:window]: if val == val: nobs += 1 sum_x += val output[j] = NaN if not nobs else sum_x / nobs for val in input[window:]: prev = input[j] if prev == prev: sum_x -= prev nobs -= 1 if val == val: nobs += 1 sum_x += val j += 1 output[j] = NaN if not nobs else sum_x / nobs return output
Apply a rolling mean function to an array. This is a simple rolling aggregation.
def set_unbind(self): """ Unsets key bindings. """ self.unbind('<Button-1>') self.unbind('<Button-3>') self.unbind('<Up>') self.unbind('<Down>') self.unbind('<Shift-Up>') self.unbind('<Shift-Down>') self.unbind('<Control-Up>') self.unbind('<Control-Down>') self.unbind('<Double-Button-1>') self.unbind('<Double-Button-3>') self.unbind('<Shift-Button-1>') self.unbind('<Shift-Button-3>') self.unbind('<Control-Button-1>') self.unbind('<Control-Button-3>') self.unbind('<Enter>')
Unsets key bindings.
def clean_storage(block_device): ''' Ensures a block device is clean. That is: - unmounted - any lvm volume groups are deactivated - any lvm physical device signatures removed - partition table wiped :param block_device: str: Full path to block device to clean. ''' for mp, d in mounts(): if d == block_device: juju_log('clean_storage(): %s is mounted @ %s, unmounting.' % (d, mp), level=INFO) umount(mp, persist=True) if is_lvm_physical_volume(block_device): deactivate_lvm_volume_group(block_device) remove_lvm_physical_volume(block_device) else: zap_disk(block_device)
Ensures a block device is clean. That is: - unmounted - any lvm volume groups are deactivated - any lvm physical device signatures removed - partition table wiped :param block_device: str: Full path to block device to clean.
def _get_row_str(self, i): """ Returns a string representation of the key information in a row """ row_data = ["{:s}".format(self.data['eventID'][i]), "{:g}".format(self.data['year'][i]), "{:g}".format(self.data['month'][i]), "{:g}".format(self.data['day'][i]), "{:g}".format(self.data['hour'][i]), "{:g}".format(self.data['minute'][i]), "{:.1f}".format(self.data['second'][i]), "{:.3f}".format(self.data['longitude'][i]), "{:.3f}".format(self.data['latitude'][i]), "{:.1f}".format(self.data['depth'][i]), "{:.1f}".format(self.data['magnitude'][i])] return " ".join(row_data)
Returns a string representation of the key information in a row
def publish(context): """Publishes the project""" commit_version_change(context) if context.github: # github token project_settings = project_config(context.module_name) if not project_settings['gh_token']: click.echo('You need a GitHub token for changes to create a release.') click.pause( 'Press [enter] to launch the GitHub "New personal access ' 'token" page, to create a token for changes.' ) click.launch('https://github.com/settings/tokens/new') project_settings['gh_token'] = click.prompt('Enter your changes token') store_settings(context.module_name, project_settings) description = click.prompt('Describe this release') upload_url = create_github_release( context, project_settings['gh_token'], description ) upload_release_distributions( context, project_settings['gh_token'], build_distributions(context), upload_url, ) click.pause('Press [enter] to review and update your new release') click.launch( '{0}/releases/tag/{1}'.format(context.repo_url, context.new_version) ) else: tag_and_push(context)
Publishes the project
def get_activities_for_objectives(self, objective_ids=None): """Gets the activities for the given objectives. In plenary mode, the returned list contains all of the activities specified in the objective Id list, in the order of the list, including duplicates, or an error results if a course offering Id in the supplied list is not found or inaccessible. Otherwise, inaccessible Activities may be omitted from the list and may present the elements in any order including returning a unique set. arg: objectiveIds (osid.id.IdList): list of objective Ids return: (osid.learning.ActivityList) - list of activities raise: NotFound - an objectiveId not found raise: NullArgument - objectiveIdList is null raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure compliance: mandatory - This method is must be implemented. """ if objective_ids is None: raise NullArgument() # Should also check if objective_id exists? activities = [] for i in objective_ids: acts = None url_path = construct_url('activities', bank_id=self._catalog_idstr, obj_id=i) try: acts = json.loads(self._get_request(url_path)) except (NotFound, OperationFailed): if self._activity_view == PLENARY: raise else: pass if acts: activities += acts return objects.ActivityList(activities)
Gets the activities for the given objectives. In plenary mode, the returned list contains all of the activities specified in the objective Id list, in the order of the list, including duplicates, or an error results if a course offering Id in the supplied list is not found or inaccessible. Otherwise, inaccessible Activities may be omitted from the list and may present the elements in any order including returning a unique set. arg: objectiveIds (osid.id.IdList): list of objective Ids return: (osid.learning.ActivityList) - list of activities raise: NotFound - an objectiveId not found raise: NullArgument - objectiveIdList is null raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure compliance: mandatory - This method is must be implemented.
def clone(self, substitutions, commit=True, **kwargs): """ Clone a DAG, optionally skipping the commit. """ return self.store.clone(substitutions, **kwargs)
Clone a DAG, optionally skipping the commit.
def _handle_client_error(): """ Handle boto exception and convert to class IO exceptions Raises: OSError subclasses: IO error. """ try: yield except _ClientError as exception: error = exception.response['Error'] if error['Code'] in _ERROR_CODES: raise _ERROR_CODES[error['Code']](error['Message']) raise
Handle boto exception and convert to class IO exceptions Raises: OSError subclasses: IO error.
async def system_bus_async(loop = None, **kwargs) : "returns a Connection object for the D-Bus system bus." return \ Connection \ ( await dbus.Connection.bus_get_async(DBUS.BUS_SYSTEM, private = False, loop = loop) ) \ .register_additional_standard(**kwargs)
returns a Connection object for the D-Bus system bus.
def from_series(self, series, add_index_column=True): """ Set tabular attributes to the writer from :py:class:`pandas.Series`. Following attributes are set by the method: - :py:attr:`~.headers` - :py:attr:`~.value_matrix` - :py:attr:`~.type_hints` Args: series(pandas.Series): Input pandas.Series object. add_index_column(bool, optional): If |True|, add a column of ``index`` of the ``series``. Defaults to |True|. """ if series.name: self.headers = [series.name] else: self.headers = ["value"] self.type_hints = [self.__get_typehint_from_dtype(series.dtype)] if add_index_column: self.headers = [""] + self.headers if self.type_hints: self.type_hints = [None] + self.type_hints self.value_matrix = [ [index] + [value] for index, value in zip(series.index.tolist(), series.tolist()) ] else: self.value_matrix = [[value] for value in series.tolist()]
Set tabular attributes to the writer from :py:class:`pandas.Series`. Following attributes are set by the method: - :py:attr:`~.headers` - :py:attr:`~.value_matrix` - :py:attr:`~.type_hints` Args: series(pandas.Series): Input pandas.Series object. add_index_column(bool, optional): If |True|, add a column of ``index`` of the ``series``. Defaults to |True|.
def get_v_total_stress_at_depth(self, z): """ Determine the vertical total stress at depth z, where z can be a number or an array of numbers. """ if not hasattr(z, "__len__"): return self.one_vertical_total_stress(z) else: sigma_v_effs = [] for value in z: sigma_v_effs.append(self.one_vertical_total_stress(value)) return np.array(sigma_v_effs)
Determine the vertical total stress at depth z, where z can be a number or an array of numbers.
def get(self, timeout=None, block=True, throw_dead=True): """ Sleep waiting for a message to arrive on this receiver. :param float timeout: If not :data:`None`, specifies a timeout in seconds. :raises mitogen.core.ChannelError: The remote end indicated the channel should be closed, communication with it was lost, or :meth:`close` was called in the local process. :raises mitogen.core.TimeoutError: Timeout was reached. :returns: :class:`Message` that was received. """ _vv and IOLOG.debug('%r.get(timeout=%r, block=%r)', self, timeout, block) try: msg = self._latch.get(timeout=timeout, block=block) except LatchError: raise ChannelError(self.closed_msg) if msg.is_dead and throw_dead: msg._throw_dead() return msg
Sleep waiting for a message to arrive on this receiver. :param float timeout: If not :data:`None`, specifies a timeout in seconds. :raises mitogen.core.ChannelError: The remote end indicated the channel should be closed, communication with it was lost, or :meth:`close` was called in the local process. :raises mitogen.core.TimeoutError: Timeout was reached. :returns: :class:`Message` that was received.
def pairedBEDIterator(inputStreams, mirror=False, mirrorScore=None, ignoreStrand=False, ignoreScore=True, ignoreName=True, sortedby=ITERATOR_SORTED_END, scoreType=float, verbose=False): """ Iterate over multiple BED format files simultaneously and yield lists of genomic intervals for each matching set of intervals found. By default, regions which are not found in all files will be skipped (mirror = false). Optionally (by setting mirror to true) if a file is missing an interval, it can be added on-the-fly, and will have the same chrom, start and end and name as in other files. The score will be taken from the first file in inputStreams if mirrorScore is not set, otherwise that value will be used. :param inputStreams: a list of input streams in BED format :param mirror: if true, add missing elements so all streams contain the same elements. Inserted elements will have the same :param ignoreStrand: ignore strand when comparing elements for equality? :param ignoreScore: ignore score when comparing elements for equality? :param ignoreScore: ignore name when comparing elements for equality? :param sortedby: must be set to one of the sorting orders for BED streams; we require the streams to be sorted in some fashion. :param scoreType: interpret scores as what type? Defaults to float, which is generally the most flexible. """ # let's build our sorting order... sortOrder = ["chrom"] if sortedby == ITERATOR_SORTED_START: sortOrder.append("start") sortOrder.append("end") elif sortedby == ITERATOR_SORTED_END: sortOrder.append("end") sortOrder.append("start") if not ignoreStrand: sortOrder.append("strand") if not ignoreName: sortOrder.append("name") if not ignoreScore: sortOrder.append("score") keyFunc = attrgetter(*sortOrder) def next_item(iterator): """ little internal function to return the next item, or None """ try: return iterator.next() except StopIteration: return None bIterators = [BEDIterator(bfh, verbose=verbose, sortedby=sortedby, scoreType=scoreType) for bfh in inputStreams] elements = [next_item(it) for it in bIterators] while True: assert(len(elements) >= 2) if None not in elements and len(set([keyFunc(x) for x in elements])) == 1: # All equal -- yield and move on for all streams yield [e for e in elements] elements = [next_item(it) for it in bIterators] else: # something wasn't equal.. find the smallest thing, it's about to drop # out of range and will never have the chance to match anything again minElement = min([x for x in elements if x is not None], key=keyFunc) minIndices = [i for i in range(0, len(elements)) if elements[i] is not None and keyFunc(elements[i]) == keyFunc(minElement)] if mirror: # mirror the min item for any streams in which it doesn't match score = minElement.score if mirrorScore is None else mirrorScore yield [elements[i] if i in minIndices else GenomicInterval(minElement.chrom, minElement.start, minElement.end, minElement.name, score, minElement.strand, scoreType=scoreType) for i in range(0, len(elements))] # move the smallest element onwards now, we're done with it for index in minIndices: elements[index] = next_item(bIterators[index]) # stop once all streams are exhausted if reduce(lambda x, y: x and y, [e is None for e in elements]): break
Iterate over multiple BED format files simultaneously and yield lists of genomic intervals for each matching set of intervals found. By default, regions which are not found in all files will be skipped (mirror = false). Optionally (by setting mirror to true) if a file is missing an interval, it can be added on-the-fly, and will have the same chrom, start and end and name as in other files. The score will be taken from the first file in inputStreams if mirrorScore is not set, otherwise that value will be used. :param inputStreams: a list of input streams in BED format :param mirror: if true, add missing elements so all streams contain the same elements. Inserted elements will have the same :param ignoreStrand: ignore strand when comparing elements for equality? :param ignoreScore: ignore score when comparing elements for equality? :param ignoreScore: ignore name when comparing elements for equality? :param sortedby: must be set to one of the sorting orders for BED streams; we require the streams to be sorted in some fashion. :param scoreType: interpret scores as what type? Defaults to float, which is generally the most flexible.
def parse_annotation(code): """Parse an annotation string. Return an AST Expr node. code: annotation string (excluding '@') """ module = ast.parse(code) assert type(module) is ast.Module, 'internal error #1' assert len(module.body) == 1, 'Annotation contains more than one expression' assert type(module.body[0]) is ast.Expr, 'Annotation is not expression' return module.body[0]
Parse an annotation string. Return an AST Expr node. code: annotation string (excluding '@')
def get_outfile(self, outfile, goids=None): """Return output file for GO Term plot.""" # 1. Use the user-specfied output filename for the GO Term plot if outfile != self.dflt_outfile: return outfile # 2. If only plotting 1 GO term, use GO is in plot name if goids is not None and len(goids) == 1: goid = next(iter(goids)) goobj = self.gosubdag.go2obj[goid] fout = "GO_{NN}_{NM}".format(NN=goid.replace("GO:", ""), NM=goobj.name) return ".".join([re.sub(r"[\s#'()+,-./:<=>\[\]_}]", '_', fout), 'png']) # 3. Return default name return self.dflt_outfile
Return output file for GO Term plot.
def completeness(self, delta, method='step'): """ Return the completeness as a function of magnitude. ADW: Eventually want a completeness mask to set overall efficiency. """ delta = np.asarray(delta) if method == 'step': func = lambda delta: (delta > 0).astype(float) elif method == 'erf': # Trust the SDSS EDR??? # 95% completeness: def func(delta): # Efficiency at bright end (assumed to be 100%) e = 1.0 # EDR says full width is ~0.5 mag width = 0.2 # This should be the halfway point in the curve return (e/2.0)*(1/np.sqrt(2*width))*(np.sqrt(2*width)-scipy.special.erf(-delta)) elif method == 'flemming': # Functional form taken from Fleming et al. AJ 109, 1044 (1995) # http://adsabs.harvard.edu/abs/1995AJ....109.1044F # f = 1/2 [1 - alpha(V - Vlim)/sqrt(1 + alpha^2 (V - Vlim)^2)] # CAREFUL: This definition is for Vlim = 50% completeness def func(delta): alpha = 2.0 return 0.5 * (1 - (alpha * delta)/np.sqrt(1+alpha**2 * delta**2)) else: raise Exception('...') return func(delta)
Return the completeness as a function of magnitude. ADW: Eventually want a completeness mask to set overall efficiency.
def signout(self, redirect_url = "/"): """ 注销登录状态 参数: redirect_url 跳转链接,为 None 时不跳转 (Ajax 可能用得到)。 """ self.clear_cookie(self._USER_NAME) if redirect_url: self.redirect(redirect_url)
注销登录状态 参数: redirect_url 跳转链接,为 None 时不跳转 (Ajax 可能用得到)。
def getAsKmlPngAnimation(self, session, projectFile=None, path=None, documentName=None, colorRamp=None, alpha=1.0, noDataValue=0, drawOrder=0, cellSize=None, resampleMethod='NearestNeighbour'): """ Retrieve the WMS dataset as a PNG time stamped KMZ Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. projectFile(:class:`gsshapy.orm.ProjectFile`): Project file object for the GSSHA project to which the WMS dataset belongs. path (str, optional): Path to file where KML file will be written. Defaults to None. documentName (str, optional): Name of the KML document. This will be the name that appears in the legend. Defaults to 'Stream Network'. colorRamp (:mod:`mapkit.ColorRampGenerator.ColorRampEnum` or dict, optional): Use ColorRampEnum to select a default color ramp or a dictionary with keys 'colors' and 'interpolatedPoints' to specify a custom color ramp. The 'colors' key must be a list of RGB integer tuples (e.g.: (255, 0, 0)) and the 'interpolatedPoints' must be an integer representing the number of points to interpolate between each color given in the colors list. alpha (float, optional): Set transparency of visualization. Value between 0.0 and 1.0 where 1.0 is 100% opaque and 0.0 is 100% transparent. Defaults to 1.0. noDataValue (float, optional): The value to treat as no data when generating visualizations of rasters. Defaults to 0.0. drawOrder (int, optional): Set the draw order of the images. Defaults to 0. cellSize (float, optional): Define the cell size in the units of the project projection at which to resample the raster to generate the PNG. Defaults to None which will cause the PNG to be generated with the original raster cell size. It is generally better to set this to a size smaller than the original cell size to obtain a higher resolution image. However, computation time increases exponentially as the cell size is decreased. resampleMethod (str, optional): If cellSize is set, this method will be used to resample the raster. Valid values include: NearestNeighbour, Bilinear, Cubic, CubicSpline, and Lanczos. Defaults to NearestNeighbour. Returns: (str, list): Returns a KML string and a list of binary strings that are the PNG images. """ # Prepare rasters timeStampedRasters = self._assembleRasterParams(projectFile, self.rasters) # Make sure the raster field is valid converter = RasterConverter(sqlAlchemyEngineOrSession=session) # Configure color ramp if isinstance(colorRamp, dict): converter.setCustomColorRamp(colorRamp['colors'], colorRamp['interpolatedPoints']) else: converter.setDefaultColorRamp(colorRamp) if documentName is None: documentName = self.fileExtension kmlString, binaryPngStrings = converter.getAsKmlPngAnimation(tableName=WMSDatasetRaster.tableName, timeStampedRasters=timeStampedRasters, rasterIdFieldName='id', rasterFieldName='raster', documentName=documentName, alpha=alpha, drawOrder=drawOrder, cellSize=cellSize, noDataValue=noDataValue, resampleMethod=resampleMethod) if path: directory = os.path.dirname(path) archiveName = (os.path.split(path)[1]).split('.')[0] kmzPath = os.path.join(directory, (archiveName + '.kmz')) with ZipFile(kmzPath, 'w') as kmz: kmz.writestr(archiveName + '.kml', kmlString) for index, binaryPngString in enumerate(binaryPngStrings): kmz.writestr('raster{0}.png'.format(index), binaryPngString) return kmlString, binaryPngStrings
Retrieve the WMS dataset as a PNG time stamped KMZ Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. projectFile(:class:`gsshapy.orm.ProjectFile`): Project file object for the GSSHA project to which the WMS dataset belongs. path (str, optional): Path to file where KML file will be written. Defaults to None. documentName (str, optional): Name of the KML document. This will be the name that appears in the legend. Defaults to 'Stream Network'. colorRamp (:mod:`mapkit.ColorRampGenerator.ColorRampEnum` or dict, optional): Use ColorRampEnum to select a default color ramp or a dictionary with keys 'colors' and 'interpolatedPoints' to specify a custom color ramp. The 'colors' key must be a list of RGB integer tuples (e.g.: (255, 0, 0)) and the 'interpolatedPoints' must be an integer representing the number of points to interpolate between each color given in the colors list. alpha (float, optional): Set transparency of visualization. Value between 0.0 and 1.0 where 1.0 is 100% opaque and 0.0 is 100% transparent. Defaults to 1.0. noDataValue (float, optional): The value to treat as no data when generating visualizations of rasters. Defaults to 0.0. drawOrder (int, optional): Set the draw order of the images. Defaults to 0. cellSize (float, optional): Define the cell size in the units of the project projection at which to resample the raster to generate the PNG. Defaults to None which will cause the PNG to be generated with the original raster cell size. It is generally better to set this to a size smaller than the original cell size to obtain a higher resolution image. However, computation time increases exponentially as the cell size is decreased. resampleMethod (str, optional): If cellSize is set, this method will be used to resample the raster. Valid values include: NearestNeighbour, Bilinear, Cubic, CubicSpline, and Lanczos. Defaults to NearestNeighbour. Returns: (str, list): Returns a KML string and a list of binary strings that are the PNG images.
def select_enclosed_points(dataset, surface, tolerance=0.001, inside_out=False, check_surface=True): """Mark points as to whether they are inside a closed surface. This evaluates all the input points to determine whether they are in an enclosed surface. The filter produces a (0,1) mask (in the form of a vtkDataArray) that indicates whether points are outside (mask value=0) or inside (mask value=1) a provided surface. (The name of the output vtkDataArray is "SelectedPointsArray".) The filter assumes that the surface is closed and manifold. A boolean flag can be set to force the filter to first check whether this is true. If false, all points will be marked outside. Note that if this check is not performed and the surface is not closed, the results are undefined. This filter produces and output data array, but does not modify the input dataset. If you wish to extract cells or poinrs, various threshold filters are available (i.e., threshold the output array). Parameters ---------- surface : vtki.PolyData Set the surface to be used to test for containment. This must be a :class:`vtki.PolyData` object. tolerance : float The tolerance on the intersection. The tolerance is expressed as a fraction of the bounding box of the enclosing surface. inside_out : bool By default, points inside the surface are marked inside or sent to the output. If ``inside_out`` is ``True``, then the points outside the surface are marked inside. check_surface : bool Specify whether to check the surface for closure. If on, then the algorithm first checks to see if the surface is closed and manifold. """ alg = vtk.vtkSelectEnclosedPoints() alg.SetInputData(dataset) alg.SetSurfaceData(surface) alg.SetTolerance(tolerance) alg.SetCheckSurface(check_surface) alg.Update() return _get_output(alg)
Mark points as to whether they are inside a closed surface. This evaluates all the input points to determine whether they are in an enclosed surface. The filter produces a (0,1) mask (in the form of a vtkDataArray) that indicates whether points are outside (mask value=0) or inside (mask value=1) a provided surface. (The name of the output vtkDataArray is "SelectedPointsArray".) The filter assumes that the surface is closed and manifold. A boolean flag can be set to force the filter to first check whether this is true. If false, all points will be marked outside. Note that if this check is not performed and the surface is not closed, the results are undefined. This filter produces and output data array, but does not modify the input dataset. If you wish to extract cells or poinrs, various threshold filters are available (i.e., threshold the output array). Parameters ---------- surface : vtki.PolyData Set the surface to be used to test for containment. This must be a :class:`vtki.PolyData` object. tolerance : float The tolerance on the intersection. The tolerance is expressed as a fraction of the bounding box of the enclosing surface. inside_out : bool By default, points inside the surface are marked inside or sent to the output. If ``inside_out`` is ``True``, then the points outside the surface are marked inside. check_surface : bool Specify whether to check the surface for closure. If on, then the algorithm first checks to see if the surface is closed and manifold.
def pay_deliver_notify(self, **deliver_info): """ 通知 腾讯发货 一般形式 :: wxclient.pay_delivernotify( openid=openid, transid=transaction_id, out_trade_no=本地订单号, deliver_timestamp=int(time.time()), deliver_status="1", deliver_msg="ok" ) :param 需要签名的的参数 :return: 支付需要的对象 """ params, sign, _ = self._pay_sign_dict( add_noncestr=False, add_timestamp=False, **deliver_info ) params['app_signature'] = sign params['sign_method'] = 'sha1' return self.post( url="https://api.weixin.qq.com/pay/delivernotify", data=params )
通知 腾讯发货 一般形式 :: wxclient.pay_delivernotify( openid=openid, transid=transaction_id, out_trade_no=本地订单号, deliver_timestamp=int(time.time()), deliver_status="1", deliver_msg="ok" ) :param 需要签名的的参数 :return: 支付需要的对象
def draw_curve(self, grid_characters: BoxDrawCharacterSet, *, top: bool = False, left: bool = False, right: bool = False, bottom: bool = False, crossing_char: Optional[str] = None): """Draws lines in the box using the given character set. Supports merging the new lines with the lines from a previous call to draw_curve, including when they have different character sets (assuming there exist characters merging the two). Args: grid_characters: The character set to draw the curve with. top: Draw topward leg? left: Draw leftward leg? right: Draw rightward leg? bottom: Draw downward leg? crossing_char: Overrides the all-legs-present character. Useful for ascii diagrams, where the + doesn't always look the clearest. """ if not any([top, left, right, bottom]): return # Remember which legs are new, old, or missing. sign_top = +1 if top else -1 if self.top else 0 sign_bottom = +1 if bottom else -1 if self.bottom else 0 sign_left = +1 if left else -1 if self.left else 0 sign_right = +1 if right else -1 if self.right else 0 # Add new segments. if top: self.top = grid_characters.top_bottom if bottom: self.bottom = grid_characters.top_bottom if left: self.left = grid_characters.left_right if right: self.right = grid_characters.left_right # Fill center. if not all([crossing_char, self.top, self.bottom, self.left, self.right]): crossing_char = box_draw_character( self._prev_curve_grid_chars, grid_characters, top=sign_top, bottom=sign_bottom, left=sign_left, right=sign_right) self.center = crossing_char or '' self._prev_curve_grid_chars = grid_characters
Draws lines in the box using the given character set. Supports merging the new lines with the lines from a previous call to draw_curve, including when they have different character sets (assuming there exist characters merging the two). Args: grid_characters: The character set to draw the curve with. top: Draw topward leg? left: Draw leftward leg? right: Draw rightward leg? bottom: Draw downward leg? crossing_char: Overrides the all-legs-present character. Useful for ascii diagrams, where the + doesn't always look the clearest.
def add_tags(self, archive_name, tags): ''' Add tags to an archive Parameters ---------- archive_name:s tr Name of archive tags: list or tuple of strings tags to add to the archive ''' updated_tag_list = list(self._get_tags(archive_name)) for tag in tags: if tag not in updated_tag_list: updated_tag_list.append(tag) self._set_tags(archive_name, updated_tag_list)
Add tags to an archive Parameters ---------- archive_name:s tr Name of archive tags: list or tuple of strings tags to add to the archive
def add_session(session=None): r"""Add a session to the SessionActivity table. :param session: Flask Session object to add. If None, ``session`` is used. The object is expected to have a dictionary entry named ``"user_id"`` and a field ``sid_s`` """ user_id, sid_s = session['user_id'], session.sid_s with db.session.begin_nested(): session_activity = SessionActivity( user_id=user_id, sid_s=sid_s, ip=request.remote_addr, country=_ip2country(request.remote_addr), **_extract_info_from_useragent( request.headers.get('User-Agent', '') ) ) db.session.merge(session_activity)
r"""Add a session to the SessionActivity table. :param session: Flask Session object to add. If None, ``session`` is used. The object is expected to have a dictionary entry named ``"user_id"`` and a field ``sid_s``
def get_term_freq_df(self, label_append=' freq'): ''' Parameters ------- label_append : str Returns ------- pd.DataFrame indexed on terms, with columns giving frequencies for each ''' ''' row = self._row_category_ids() newX = csr_matrix((self._X.data, (row, self._X.indices))) return self._term_freq_df_from_matrix(newX) ''' mat = self.get_term_freq_mat() return pd.DataFrame(mat, index=pd.Series(self.get_terms(), name='term'), columns=[c + label_append for c in self.get_categories()])
Parameters ------- label_append : str Returns ------- pd.DataFrame indexed on terms, with columns giving frequencies for each
def loadJSON(self, json_string): """ Sets the values of the run parameters given an JSON string """ g = get_root(self).globals user = json.loads(json_string)['user'] def setField(widget, field): val = user.get(field) if val is not None: widget.set(val) setField(self.prog_ob.obid, 'OB') setField(self.target, 'target') setField(self.prog_ob.progid, 'ID') setField(self.pi, 'PI') setField(self.observers, 'Observers') setField(self.comment, 'comment') setField(self.filter, 'filters') setField(g.observe.rtype, 'flags')
Sets the values of the run parameters given an JSON string
def _read_name_text( self, bufr, platform_id, encoding_id, strings_offset, name_str_offset, length): """ Return the unicode name string at *name_str_offset* or |None| if decoding its format is not supported. """ raw_name = self._raw_name_string( bufr, strings_offset, name_str_offset, length ) return self._decode_name(raw_name, platform_id, encoding_id)
Return the unicode name string at *name_str_offset* or |None| if decoding its format is not supported.
def data_not_in(db_data, user_data): """Validate data not in user data. Args: db_data (str): The data store in Redis. user_data (list): The user provided data. Returns: bool: True if the data passed validation. """ if isinstance(user_data, list): if db_data not in user_data: return True return False
Validate data not in user data. Args: db_data (str): The data store in Redis. user_data (list): The user provided data. Returns: bool: True if the data passed validation.
def tokenize(self, text): ''' tokenize function in Tokenizer. ''' start = -1 tokens = [] for i, character in enumerate(text): if character == ' ' or character == '\t': if start >= 0: word = text[start:i] tokens.append({ 'word': word, 'original_text': word, 'char_begin': start, 'char_end': i}) start = -1 else: if start < 0: start = i if start >= 0: tokens.append({ 'word': text[start:len(text)], 'original_text': text[start:len(text)], 'char_begin': start, 'char_end': len(text) }) return tokens
tokenize function in Tokenizer.
def _readbytes(self, length, start): """Read bytes and return them. Note that length is in bits.""" assert length % 8 == 0 assert start + length <= self.len if not (start + self._offset) % 8: return bytes(self._datastore.getbyteslice((start + self._offset) // 8, (start + self._offset + length) // 8)) return self._slice(start, start + length).tobytes()
Read bytes and return them. Note that length is in bits.
def visitSenseFlags(self, ctx: ShExDocParser.SenseFlagsContext): """ '!' '^'? | '^' '!'? """ if '!' in ctx.getText(): self.expression.negated = True if '^' in ctx.getText(): self.expression.inverse = True
'!' '^'? | '^' '!'?
def setExpertLevel(self): """ Set expert level """ g = get_root(self).globals level = g.cpars['expert_level'] # now set whether buttons are permanently enabled or not if level == 0 or level == 1: self.load.setNonExpert() self.save.setNonExpert() self.unfreeze.setNonExpert() self.start.setNonExpert() self.stop.setNonExpert() elif level == 2: self.load.setExpert() self.save.setExpert() self.unfreeze.setExpert() self.start.setExpert() self.stop.setExpert()
Set expert level
def get_position(self, dt): """Given dt in [0, 1], return the current position of the tile.""" return self.sx + self.dx * dt, self.sy + self.dy * dt
Given dt in [0, 1], return the current position of the tile.
def _guess_iface_name(netif): """ We attempt to guess the name of interfaces that are truncated from the output of ifconfig -l. If there is only one possible candidate matching the interface name then we return it. If there are none or more, then we return None. """ with os.popen('%s -l' % conf.prog.ifconfig) as fdesc: ifaces = fdesc.readline().strip().split(' ') matches = [iface for iface in ifaces if iface.startswith(netif)] if len(matches) == 1: return matches[0] return None
We attempt to guess the name of interfaces that are truncated from the output of ifconfig -l. If there is only one possible candidate matching the interface name then we return it. If there are none or more, then we return None.
def sasdata2dataframe(self, table: str, libref: str = '', dsopts: dict = None, method: str = 'MEMORY', **kwargs) -> 'pd.DataFrame': """ This method exports the SAS Data Set to a Pandas Data Frame, returning the Data Frame object. SASdata object that refers to the Sas Data Set you want to export to a Pandas Data Frame :param table: the name of the SAS Data Set you want to export to a Pandas Data Frame :param libref: the libref for the SAS Data Set. :param dsopts: a dictionary containing any of the following SAS data set options(where, drop, keep, obs, firstobs): - where is a string - keep are strings or list of strings. - drop are strings or list of strings. - obs is a numbers - either string or int - first obs is a numbers - either string or int - format is a string or dictionary { var: format } .. code-block:: python {'where' : 'msrp < 20000 and make = "Ford"' 'keep' : 'msrp enginesize Cylinders Horsepower Weight' 'drop' : ['msrp', 'enginesize', 'Cylinders', 'Horsepower', 'Weight'] 'obs' : 10 'firstobs' : '12' 'format' : {'money': 'dollar10', 'time': 'tod5.'} } :param method: defaults to MEMORY; the original method. CSV is the other choice which uses an intermediary csv file; faster for large data :param kwargs: dictionary :return: Pandas data frame """ dsopts = dsopts if dsopts is not None else {} if self.exist(table, libref) == 0: print('The SAS Data Set ' + libref + '.' + table + ' does not exist') return None if self.nosub: print("too complicated to show the code, read the source :), sorry.") return None else: return self._io.sasdata2dataframe(table, libref, dsopts, method=method, **kwargs)
This method exports the SAS Data Set to a Pandas Data Frame, returning the Data Frame object. SASdata object that refers to the Sas Data Set you want to export to a Pandas Data Frame :param table: the name of the SAS Data Set you want to export to a Pandas Data Frame :param libref: the libref for the SAS Data Set. :param dsopts: a dictionary containing any of the following SAS data set options(where, drop, keep, obs, firstobs): - where is a string - keep are strings or list of strings. - drop are strings or list of strings. - obs is a numbers - either string or int - first obs is a numbers - either string or int - format is a string or dictionary { var: format } .. code-block:: python {'where' : 'msrp < 20000 and make = "Ford"' 'keep' : 'msrp enginesize Cylinders Horsepower Weight' 'drop' : ['msrp', 'enginesize', 'Cylinders', 'Horsepower', 'Weight'] 'obs' : 10 'firstobs' : '12' 'format' : {'money': 'dollar10', 'time': 'tod5.'} } :param method: defaults to MEMORY; the original method. CSV is the other choice which uses an intermediary csv file; faster for large data :param kwargs: dictionary :return: Pandas data frame
def history(self): """Returns an SQLAlchemy query of the object's history (previous versions). If the class does not support history/versioning, returns None. """ history = self.history_class if history: return self.session.query(history).filter(history.id == self.id) else: return None
Returns an SQLAlchemy query of the object's history (previous versions). If the class does not support history/versioning, returns None.
def get_field(name, data, default="object", document_object_field=None, is_document=False): """ Return a valid Field by given data """ if isinstance(data, AbstractField): return data data = keys_to_string(data) _type = data.get('type', default) if _type == "string": return StringField(name=name, **data) elif _type == "binary": return BinaryField(name=name, **data) elif _type == "boolean": return BooleanField(name=name, **data) elif _type == "byte": return ByteField(name=name, **data) elif _type == "short": return ShortField(name=name, **data) elif _type == "integer": return IntegerField(name=name, **data) elif _type == "long": return LongField(name=name, **data) elif _type == "float": return FloatField(name=name, **data) elif _type == "double": return DoubleField(name=name, **data) elif _type == "ip": return IpField(name=name, **data) elif _type == "date": return DateField(name=name, **data) elif _type == "multi_field": return MultiField(name=name, **data) elif _type == "geo_point": return GeoPointField(name=name, **data) elif _type == "attachment": return AttachmentField(name=name, **data) elif is_document or _type == "document": if document_object_field: return document_object_field(name=name, **data) else: data.pop("name",None) return DocumentObjectField(name=name, **data) elif _type == "object": if '_timestamp' in data or "_all" in data: if document_object_field: return document_object_field(name=name, **data) else: return DocumentObjectField(name=name, **data) return ObjectField(name=name, **data) elif _type == "nested": return NestedObject(name=name, **data) raise RuntimeError("Invalid type: %s" % _type)
Return a valid Field by given data
def is_valid_bucket_notification_config(notifications): """ Validate the notifications config structure :param notifications: Dictionary with specific structure. :return: True if input is a valid bucket notifications structure. Raise :exc:`InvalidArgumentError` otherwise. """ # check if notifications is a dict. if not isinstance(notifications, dict): raise TypeError('notifications configuration must be a dictionary') if len(notifications) == 0: raise InvalidArgumentError( 'notifications configuration may not be empty' ) VALID_NOTIFICATION_KEYS = set([ "TopicConfigurations", "QueueConfigurations", "CloudFunctionConfigurations", ]) VALID_SERVICE_CONFIG_KEYS = set([ 'Id', 'Arn', 'Events', 'Filter', ]) NOTIFICATION_EVENTS = set([ 's3:ReducedRedundancyLostObject', 's3:ObjectCreated:*', 's3:ObjectCreated:Put', 's3:ObjectCreated:Post', 's3:ObjectCreated:Copy', 's3:ObjectCreated:CompleteMultipartUpload', 's3:ObjectRemoved:*', 's3:ObjectRemoved:Delete', 's3:ObjectRemoved:DeleteMarkerCreated', ]) for key, value in notifications.items(): # check if key names are valid if key not in VALID_NOTIFICATION_KEYS: raise InvalidArgumentError(( '{} is an invalid key ' 'for notifications configuration').format(key)) # check if config values conform # first check if value is a list if not isinstance(value, list): raise InvalidArgumentError(( 'The value for key "{}" in the notifications ' 'configuration must be a list.').format(key)) for service_config in value: # check type matches if not isinstance(service_config, dict): raise InvalidArgumentError(( 'Each service configuration item for "{}" must be a ' 'dictionary').format(key)) # check keys are valid for skey in service_config.keys(): if skey not in VALID_SERVICE_CONFIG_KEYS: raise InvalidArgumentError(( '{} is an invalid key for a service ' 'configuration item').format(skey)) # check for required keys arn = service_config.get('Arn', '') if arn == '': raise InvalidArgumentError( 'Arn key in service config must be present and has to be ' 'non-empty string' ) events = service_config.get('Events', []) if len(events) < 1: raise InvalidArgumentError( 'At least one event must be specified in a service config' ) if not isinstance(events, list): raise InvalidArgumentError('"Events" must be a list of strings ' 'in a service configuration') # check if 'Id' key is present, it should be string or bytes. if not isinstance(service_config.get('Id', ''), basestring): raise InvalidArgumentError('"Id" key must be a string') for event in events: if event not in NOTIFICATION_EVENTS: raise InvalidArgumentError( '{} is not a valid event. Valid ' 'events are: {}'.format(event, NOTIFICATION_EVENTS)) if 'Filter' in service_config: exception_msg = ( '{} - If a Filter key is given, it must be a ' 'dictionary, the dictionary must have the ' 'key "Key", and its value must be an object, with ' 'a key named "FilterRules" which must be a non-empty list.' ).format( service_config['Filter'] ) try: filter_rules = service_config.get('Filter', {}).get( 'Key', {}).get('FilterRules', []) if not isinstance(filter_rules, list): raise InvalidArgumentError(exception_msg) if len(filter_rules) < 1: raise InvalidArgumentError(exception_msg) except AttributeError: raise InvalidArgumentError(exception_msg) for filter_rule in filter_rules: try: name = filter_rule['Name'] value = filter_rule['Value'] except KeyError: raise InvalidArgumentError(( '{} - a FilterRule dictionary must have "Name" ' 'and "Value" keys').format(filter_rule)) if name not in ['prefix', 'suffix']: raise InvalidArgumentError(( '{} - The "Name" key in a filter rule must be ' 'either "prefix" or "suffix"').format(name)) return True
Validate the notifications config structure :param notifications: Dictionary with specific structure. :return: True if input is a valid bucket notifications structure. Raise :exc:`InvalidArgumentError` otherwise.
def render(self, context): """Handle the actual rendering. """ user = self._get_value(self.user_key, context) feature = self._get_value(self.feature, context) if feature is None: return '' allowed = show_feature(user, feature) return self.nodelist.render(context) if allowed else ''
Handle the actual rendering.
def to_posix_path(code_path): """ Change the code_path to be of unix-style if running on windows when supplied with an absolute windows path. Parameters ---------- code_path : str Directory in the host operating system that should be mounted within the container. Returns ------- str Posix equivalent of absolute windows style path. Examples -------- >>> to_posix_path('/Users/UserName/sam-app') /Users/UserName/sam-app >>> to_posix_path('C:\\\\Users\\\\UserName\\\\AppData\\\\Local\\\\Temp\\\\mydir') /c/Users/UserName/AppData/Local/Temp/mydir """ return re.sub("^([A-Za-z])+:", lambda match: posixpath.sep + match.group().replace(":", "").lower(), pathlib.PureWindowsPath(code_path).as_posix()) if os.name == "nt" else code_path
Change the code_path to be of unix-style if running on windows when supplied with an absolute windows path. Parameters ---------- code_path : str Directory in the host operating system that should be mounted within the container. Returns ------- str Posix equivalent of absolute windows style path. Examples -------- >>> to_posix_path('/Users/UserName/sam-app') /Users/UserName/sam-app >>> to_posix_path('C:\\\\Users\\\\UserName\\\\AppData\\\\Local\\\\Temp\\\\mydir') /c/Users/UserName/AppData/Local/Temp/mydir
def fwdl_status_output_fwdl_entries_blade_slot(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fwdl_status = ET.Element("fwdl_status") config = fwdl_status output = ET.SubElement(fwdl_status, "output") fwdl_entries = ET.SubElement(output, "fwdl-entries") blade_slot = ET.SubElement(fwdl_entries, "blade-slot") blade_slot.text = kwargs.pop('blade_slot') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def forward_for_single_feature_map(self, anchors, objectness, box_regression): """ Arguments: anchors: list[BoxList] objectness: tensor of size N, A, H, W box_regression: tensor of size N, A * 4, H, W """ device = objectness.device N, A, H, W = objectness.shape # put in the same format as anchors objectness = permute_and_flatten(objectness, N, A, 1, H, W).view(N, -1) objectness = objectness.sigmoid() box_regression = permute_and_flatten(box_regression, N, A, 4, H, W) num_anchors = A * H * W pre_nms_top_n = min(self.pre_nms_top_n, num_anchors) objectness, topk_idx = objectness.topk(pre_nms_top_n, dim=1, sorted=True) batch_idx = torch.arange(N, device=device)[:, None] box_regression = box_regression[batch_idx, topk_idx] image_shapes = [box.size for box in anchors] concat_anchors = torch.cat([a.bbox for a in anchors], dim=0) concat_anchors = concat_anchors.reshape(N, -1, 4)[batch_idx, topk_idx] proposals = self.box_coder.decode( box_regression.view(-1, 4), concat_anchors.view(-1, 4) ) proposals = proposals.view(N, -1, 4) result = [] for proposal, score, im_shape in zip(proposals, objectness, image_shapes): boxlist = BoxList(proposal, im_shape, mode="xyxy") boxlist.add_field("objectness", score) boxlist = boxlist.clip_to_image(remove_empty=False) boxlist = remove_small_boxes(boxlist, self.min_size) boxlist = boxlist_nms( boxlist, self.nms_thresh, max_proposals=self.post_nms_top_n, score_field="objectness", ) result.append(boxlist) return result
Arguments: anchors: list[BoxList] objectness: tensor of size N, A, H, W box_regression: tensor of size N, A * 4, H, W
def _estimate_param_scan_worker(estimator, params, X, evaluate, evaluate_args, failfast, return_exceptions): """ Method that runs estimation for several parameter settings. Defined as a worker for parallelization """ # run estimation model = None try: # catch any exception estimator.estimate(X, **params) model = estimator.model except KeyboardInterrupt: # we want to be able to interactively interrupt the worker, no matter of failfast=False. raise except: e = sys.exc_info()[1] if isinstance(estimator, Loggable): estimator.logger.warning("Ignored error during estimation: %s" % e) if failfast: raise # re-raise elif return_exceptions: model = e else: pass # just return model=None # deal with results res = [] # deal with result if evaluate is None: # we want full models res.append(model) # we want to evaluate function(s) of the model elif _types.is_iterable(evaluate): values = [] # the function values the model for ieval, name in enumerate(evaluate): # get method/attribute name and arguments to be evaluated #name = evaluate[ieval] args = () if evaluate_args is not None: args = evaluate_args[ieval] # wrap single arguments in an iterable again to pass them. if _types.is_string(args): args = (args, ) # evaluate try: # try calling method/property/attribute value = _call_member(estimator.model, name, failfast, *args) # couldn't find method/property/attribute except AttributeError as e: if failfast: raise e # raise an AttributeError else: value = None # we just ignore it and return None values.append(value) # if we only have one value, unpack it if len(values) == 1: values = values[0] res.append(values) else: raise ValueError('Invalid setting for evaluate: ' + str(evaluate)) if len(res) == 1: res = res[0] return res
Method that runs estimation for several parameter settings. Defined as a worker for parallelization
def _serialize_parameters(parameters): """Serialize some parameters to match python native types with formats specified in google api docs like: * True/False -> "true"/"false", * {"a": 1, "b":2} -> "a:1|b:2" :type parameters: dict oif query parameters """ for key, value in parameters.items(): if isinstance(value, bool): parameters[key] = "true" if value else "false" elif isinstance(value, dict): parameters[key] = "|".join( ("%s:%s" % (k, v) for k, v in value.items())) elif isinstance(value, (list, tuple)): parameters[key] = "|".join(value) return parameters
Serialize some parameters to match python native types with formats specified in google api docs like: * True/False -> "true"/"false", * {"a": 1, "b":2} -> "a:1|b:2" :type parameters: dict oif query parameters
def ndtr(a): """ Returns the area under the Gaussian probability density function, integrated from minus infinity to x. See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.ndtr.html#scipy.special.ndtr """ sqrth = math.sqrt(2) / 2 x = float(a) * sqrth z = abs(x) if z < sqrth: y = 0.5 + 0.5 * math.erf(x) else: y = 0.5 * math.erfc(z) if x > 0: y = 1 - y return y
Returns the area under the Gaussian probability density function, integrated from minus infinity to x. See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.ndtr.html#scipy.special.ndtr
def _runUnrealBuildTool(self, target, platform, configuration, args, capture=False): """ Invokes UnrealBuildTool with the specified parameters """ platform = self._transformBuildToolPlatform(platform) arguments = [self.getBuildScript(), target, platform, configuration] + args if capture == True: return Utility.capture(arguments, cwd=self.getEngineRoot(), raiseOnError=True) else: Utility.run(arguments, cwd=self.getEngineRoot(), raiseOnError=True)
Invokes UnrealBuildTool with the specified parameters
def from_cn(cls, common_name): """ Retrieve a certificate by its common name. """ # search with cn result_cn = [(cert['id'], [cert['cn']] + cert['altnames']) for cert in cls.list({'status': ['pending', 'valid'], 'items_per_page': 500, 'cn': common_name})] # search with altname result_alt = [(cert['id'], [cert['cn']] + cert['altnames']) for cert in cls.list({'status': ['pending', 'valid'], 'items_per_page': 500, 'altname': common_name})] result = result_cn + result_alt ret = {} for id_, fqdns in result: for fqdn in fqdns: ret.setdefault(fqdn, []).append(id_) cert_id = ret.get(common_name) if not cert_id: return return cert_id
Retrieve a certificate by its common name.
def draw_bounding_boxes(images, annotations, confidence_threshold=0): """ Visualizes bounding boxes (ground truth or predictions) by returning annotated copies of the images. Parameters ---------- images: SArray or Image An `SArray` of type `Image`. A single `Image` instance may also be given. annotations: SArray or list An `SArray` of annotations (either output from the `ObjectDetector.predict` function or ground truth). A single list of annotations may also be given, provided that it is coupled with a single image. confidence_threshold: float Confidence threshold can limit the number of boxes to draw. By default, this is set to 0, since the prediction may have already pruned with an appropriate confidence threshold. Returns ------- annotated_images: SArray or Image Similar to the input `images`, except the images are decorated with boxes to visualize the object instances. See also -------- unstack_annotations """ _numeric_param_check_range('confidence_threshold', confidence_threshold, 0.0, 1.0) from PIL import Image def draw_single_image(row): image = row['image'] anns = row['annotations'] if anns == None: anns = [] elif type(anns) == dict: anns = [anns] pil_img = Image.fromarray(image.pixel_data) _annotate_image(pil_img, anns, confidence_threshold=confidence_threshold) image = _np.array(pil_img) FORMAT_RAW = 2 annotated_image = _tc.Image(_image_data=image.tobytes(), _width=image.shape[1], _height=image.shape[0], _channels=image.shape[2], _format_enum=FORMAT_RAW, _image_data_size=image.size) return annotated_image if isinstance(images, _tc.Image) and isinstance(annotations, list): return draw_single_image({'image': images, 'annotations': annotations}) else: return (_tc.SFrame({'image': images, 'annotations': annotations}) .apply(draw_single_image))
Visualizes bounding boxes (ground truth or predictions) by returning annotated copies of the images. Parameters ---------- images: SArray or Image An `SArray` of type `Image`. A single `Image` instance may also be given. annotations: SArray or list An `SArray` of annotations (either output from the `ObjectDetector.predict` function or ground truth). A single list of annotations may also be given, provided that it is coupled with a single image. confidence_threshold: float Confidence threshold can limit the number of boxes to draw. By default, this is set to 0, since the prediction may have already pruned with an appropriate confidence threshold. Returns ------- annotated_images: SArray or Image Similar to the input `images`, except the images are decorated with boxes to visualize the object instances. See also -------- unstack_annotations
def unregister(self, command): """ Unregisters an existing command, so that this command is no longer available on the command line interface. This function is mainly used during plugin deactivation. :param command: Name of the command """ if command not in self._commands.keys(): self.log.warning("Can not unregister command %s" % command) else: # Click does not have any kind of a function to unregister/remove/deactivate already added commands. # So we need to delete the related objects manually from the click internal commands dictionary for # our root command. del(self._click_root_command.commands[command]) # Finally lets delete the command from our internal dictionary too. del(self._commands[command]) self.log.debug("Command %s got unregistered" % command)
Unregisters an existing command, so that this command is no longer available on the command line interface. This function is mainly used during plugin deactivation. :param command: Name of the command
def enable_category(self, category: str) -> None: """ Enable an entire category of commands :param category: the category to enable """ for cmd_name in list(self.disabled_commands): func = self.disabled_commands[cmd_name].command_function if hasattr(func, HELP_CATEGORY) and getattr(func, HELP_CATEGORY) == category: self.enable_command(cmd_name)
Enable an entire category of commands :param category: the category to enable
def update_registration(self, regr, uri=None): """ Submit a registration to the server to update it. :param ~acme.messages.RegistrationResource regr: The registration to update. Can be a :class:`~acme.messages.NewRegistration` instead, in order to create a new registration. :param str uri: The url to submit to. Must be specified if a :class:`~acme.messages.NewRegistration` is provided. :return: The updated registration resource. :rtype: Deferred[`~acme.messages.RegistrationResource`] """ if uri is None: uri = regr.uri if isinstance(regr, messages.RegistrationResource): message = messages.UpdateRegistration(**dict(regr.body)) else: message = regr action = LOG_ACME_UPDATE_REGISTRATION(uri=uri, registration=message) with action.context(): return ( DeferredContext(self._client.post(uri, message)) .addCallback(self._parse_regr_response, uri=uri) .addCallback(self._check_regr, regr) .addCallback( tap(lambda r: action.add_success_fields(registration=r))) .addActionFinish())
Submit a registration to the server to update it. :param ~acme.messages.RegistrationResource regr: The registration to update. Can be a :class:`~acme.messages.NewRegistration` instead, in order to create a new registration. :param str uri: The url to submit to. Must be specified if a :class:`~acme.messages.NewRegistration` is provided. :return: The updated registration resource. :rtype: Deferred[`~acme.messages.RegistrationResource`]
def _iter_lexerclasses(plugins=True): """Return an iterator over all lexer classes.""" for key in sorted(LEXERS): module_name, name = LEXERS[key][:2] if name not in _lexer_cache: _load_lexers(module_name) yield _lexer_cache[name] if plugins: for lexer in find_plugin_lexers(): yield lexer
Return an iterator over all lexer classes.
def remove(name=None, pkgs=None, **kwargs): ''' Remove the passed package(s) from the system using winrepo .. versionadded:: 0.16.0 Args: name (str): The name(s) of the package(s) to be uninstalled. Can be a single package or a comma delimited list of packages, no spaces. pkgs (list): A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. Kwargs: version (str): The version of the package to be uninstalled. If this option is used to to uninstall multiple packages, then this version will be applied to all targeted packages. Recommended using only when uninstalling a single package. If this parameter is omitted, the latest version will be uninstalled. saltenv (str): Salt environment. Default ``base`` refresh (bool): Refresh package metadata. Default ``False`` Returns: dict: Returns a dict containing the changes. If the package is removed by ``pkg.remove``: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} If the package is already uninstalled: {'<package>': {'current': 'not installed'}} CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' ''' saltenv = kwargs.get('saltenv', 'base') refresh = salt.utils.data.is_true(kwargs.get('refresh', False)) # no need to call _refresh_db_conditional as list_pkgs will do it ret = {} # Make sure name or pkgs is passed if not name and not pkgs: return 'Must pass a single package or a list of packages' # Get package parameters pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs, **kwargs)[0] # Get a list of currently installed software for comparison at the end old = list_pkgs(saltenv=saltenv, refresh=refresh, versions_as_list=True) # Loop through each package changed = [] # list of changed package names for pkgname, version_num in six.iteritems(pkg_params): # Load package information for the package pkginfo = _get_package_info(pkgname, saltenv=saltenv) # Make sure pkginfo was found if not pkginfo: msg = 'Unable to locate package {0}'.format(pkgname) log.error(msg) ret[pkgname] = msg continue # Check to see if package is installed on the system if pkgname not in old: log.debug('%s %s not installed', pkgname, version_num if version_num else '') ret[pkgname] = {'current': 'not installed'} continue removal_targets = [] # Only support a single version number if version_num is not None: # Using the salt cmdline with version=5.3 might be interpreted # as a float it must be converted to a string in order for # string matching to work. version_num = six.text_type(version_num) # At least one version of the software is installed. if version_num is None: for ver_install in old[pkgname]: if ver_install not in pkginfo and 'latest' in pkginfo: log.debug('%s %s using package latest entry to to remove', pkgname, version_num) removal_targets.append('latest') else: removal_targets.append(ver_install) else: if version_num in pkginfo: # we known how to remove this version if version_num in old[pkgname]: removal_targets.append(version_num) else: log.debug('%s %s not installed', pkgname, version_num) ret[pkgname] = {'current': '{0} not installed'.format(version_num)} continue elif 'latest' in pkginfo: # we do not have version entry, assume software can self upgrade and use latest log.debug('%s %s using package latest entry to to remove', pkgname, version_num) removal_targets.append('latest') if not removal_targets: log.error('%s %s no definition to remove this version', pkgname, version_num) ret[pkgname] = { 'current': '{0} no definition, cannot removed'.format(version_num) } continue for target in removal_targets: # Get the uninstaller uninstaller = pkginfo[target].get('uninstaller', '') cache_dir = pkginfo[target].get('cache_dir', False) uninstall_flags = pkginfo[target].get('uninstall_flags', '') # If no uninstaller found, use the installer with uninstall flags if not uninstaller and uninstall_flags: uninstaller = pkginfo[target].get('installer', '') # If still no uninstaller found, fail if not uninstaller: log.error( 'No installer or uninstaller configured for package %s', pkgname, ) ret[pkgname] = {'no uninstaller defined': target} continue # Where is the uninstaller if uninstaller.startswith(('salt:', 'http:', 'https:', 'ftp:')): # Check for the 'cache_dir' parameter in the .sls file # If true, the entire directory will be cached instead of the # individual file. This is useful for installations that are not # single files if cache_dir and uninstaller.startswith('salt:'): path, _ = os.path.split(uninstaller) __salt__['cp.cache_dir'](path, saltenv, False, None, 'E@init.sls$') # Check to see if the uninstaller is cached cached_pkg = __salt__['cp.is_cached'](uninstaller, saltenv) if not cached_pkg: # It's not cached. Cache it, mate. cached_pkg = __salt__['cp.cache_file'](uninstaller, saltenv) # Check if the uninstaller was cached successfully if not cached_pkg: log.error('Unable to cache %s', uninstaller) ret[pkgname] = {'unable to cache': uninstaller} continue # Compare the hash of the cached installer to the source only if # the file is hosted on salt: # TODO cp.cache_file does cache and hash checking? So why do it again? if uninstaller.startswith('salt:'): if __salt__['cp.hash_file'](uninstaller, saltenv) != \ __salt__['cp.hash_file'](cached_pkg): try: cached_pkg = __salt__['cp.cache_file']( uninstaller, saltenv) except MinionError as exc: return '{0}: {1}'.format(exc, uninstaller) # Check if the installer was cached successfully if not cached_pkg: log.error('Unable to cache %s', uninstaller) ret[pkgname] = {'unable to cache': uninstaller} continue else: # Run the uninstaller directly # (not hosted on salt:, https:, etc.) cached_pkg = os.path.expandvars(uninstaller) # Fix non-windows slashes cached_pkg = cached_pkg.replace('/', '\\') cache_path, _ = os.path.split(cached_pkg) # os.path.expandvars is not required as we run everything through cmd.exe /s /c if kwargs.get('extra_uninstall_flags'): uninstall_flags = '{0} {1}'.format( uninstall_flags, kwargs.get('extra_uninstall_flags', '')) # Compute msiexec string use_msiexec, msiexec = _get_msiexec(pkginfo[target].get('msiexec', False)) cmd_shell = os.getenv('ComSpec', '{0}\\system32\\cmd.exe'.format(os.getenv('WINDIR'))) # Build cmd and arguments # cmd and arguments must be separated for use with the task scheduler if use_msiexec: # Check if uninstaller is set to {guid}, if not we assume its a remote msi file. # which has already been downloaded. arguments = '"{0}" /X "{1}"'.format(msiexec, cached_pkg) else: arguments = '"{0}"'.format(cached_pkg) if uninstall_flags: arguments = '{0} {1}'.format(arguments, uninstall_flags) # Uninstall the software changed.append(pkgname) # Check Use Scheduler Option if pkginfo[target].get('use_scheduler', False): # Create Scheduled Task __salt__['task.create_task'](name='update-salt-software', user_name='System', force=True, action_type='Execute', cmd=cmd_shell, arguments='/s /c "{0}"'.format(arguments), start_in=cache_path, trigger_type='Once', start_date='1975-01-01', start_time='01:00', ac_only=False, stop_if_on_batteries=False) # Run Scheduled Task if not __salt__['task.run_wait'](name='update-salt-software'): log.error('Failed to remove %s', pkgname) log.error('Scheduled Task failed to run') ret[pkgname] = {'uninstall status': 'failed'} else: # Launch the command result = __salt__['cmd.run_all']( '"{0}" /s /c "{1}"'.format(cmd_shell, arguments), output_loglevel='trace', python_shell=False, redirect_stderr=True) if not result['retcode']: ret[pkgname] = {'uninstall status': 'success'} changed.append(pkgname) elif result['retcode'] == 3010: # 3010 is ERROR_SUCCESS_REBOOT_REQUIRED report_reboot_exit_codes = kwargs.pop( 'report_reboot_exit_codes', True) if report_reboot_exit_codes: __salt__['system.set_reboot_required_witnessed']() ret[pkgname] = {'uninstall status': 'success, reboot required'} changed.append(pkgname) elif result['retcode'] == 1641: # 1641 is ERROR_SUCCESS_REBOOT_INITIATED ret[pkgname] = {'uninstall status': 'success, reboot initiated'} changed.append(pkgname) else: log.error('Failed to remove %s', pkgname) log.error('retcode %s', result['retcode']) log.error('uninstaller output: %s', result['stdout']) ret[pkgname] = {'uninstall status': 'failed'} # Get a new list of installed software new = list_pkgs(saltenv=saltenv, refresh=False) # Take the "old" package list and convert the values to strings in # preparation for the comparison below. __salt__['pkg_resource.stringify'](old) # Check for changes in the registry difference = salt.utils.data.compare_dicts(old, new) found_chgs = all(name in difference for name in changed) end_t = time.time() + 3 # give it 3 seconds to catch up. while not found_chgs and time.time() < end_t: time.sleep(0.5) new = list_pkgs(saltenv=saltenv, refresh=False) difference = salt.utils.data.compare_dicts(old, new) found_chgs = all(name in difference for name in changed) if not found_chgs: log.warning('Expected changes for package removal may not have occured') # Compare the software list before and after # Add the difference to ret ret.update(difference) return ret
Remove the passed package(s) from the system using winrepo .. versionadded:: 0.16.0 Args: name (str): The name(s) of the package(s) to be uninstalled. Can be a single package or a comma delimited list of packages, no spaces. pkgs (list): A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. Kwargs: version (str): The version of the package to be uninstalled. If this option is used to to uninstall multiple packages, then this version will be applied to all targeted packages. Recommended using only when uninstalling a single package. If this parameter is omitted, the latest version will be uninstalled. saltenv (str): Salt environment. Default ``base`` refresh (bool): Refresh package metadata. Default ``False`` Returns: dict: Returns a dict containing the changes. If the package is removed by ``pkg.remove``: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} If the package is already uninstalled: {'<package>': {'current': 'not installed'}} CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]'