positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def list_packets(self, name=None, start=None, stop=None, page_size=500, descending=False): """ Reads packet information between the specified start and stop time. Packets are sorted by generation time and sequence number. :param ~datetime.datetime start: Minimum generation time of the returned packets (inclusive) :param ~datetime.datetime stop: Maximum genreation time of the returned packets (exclusive) :param int page_size: Page size of underlying requests. Higher values imply less overhead, but risk hitting the maximum message size limit. :param bool descending: If set to ``True`` packets are fetched in reverse order (most recent first). :rtype: ~collections.Iterable[.Packet] """ params = { 'order': 'desc' if descending else 'asc', } if name is not None: params['name'] = name if page_size is not None: params['limit'] = page_size if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) return pagination.Iterator( client=self._client, path='/archive/{}/packets'.format(self._instance), params=params, response_class=rest_pb2.ListPacketsResponse, items_key='packet', item_mapper=Packet, )
Reads packet information between the specified start and stop time. Packets are sorted by generation time and sequence number. :param ~datetime.datetime start: Minimum generation time of the returned packets (inclusive) :param ~datetime.datetime stop: Maximum genreation time of the returned packets (exclusive) :param int page_size: Page size of underlying requests. Higher values imply less overhead, but risk hitting the maximum message size limit. :param bool descending: If set to ``True`` packets are fetched in reverse order (most recent first). :rtype: ~collections.Iterable[.Packet]
def delete(self, ids): """ Method to delete equipments by their id's :param ids: Identifiers of equipments :return: None """ url = build_uri_with_ids('api/v4/equipment/%s/', ids) return super(ApiV4Equipment, self).delete(url)
Method to delete equipments by their id's :param ids: Identifiers of equipments :return: None
def logical_chassis_fwdl_status_output_overall_status(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status") config = logical_chassis_fwdl_status output = ET.SubElement(logical_chassis_fwdl_status, "output") overall_status = ET.SubElement(output, "overall-status") overall_status.text = kwargs.pop('overall_status') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def generate_pdf(self): """Generate a PDF from the displayed content.""" printer = QtGui.QPrinter(QtGui.QPrinter.HighResolution) printer.setPageSize(QtGui.QPrinter.A4) printer.setColorMode(QtGui.QPrinter.Color) printer.setOutputFormat(QtGui.QPrinter.PdfFormat) report_path = unique_filename(suffix='.pdf') printer.setOutputFileName(report_path) self.print_(printer) url = QtCore.QUrl.fromLocalFile(report_path) # noinspection PyTypeChecker,PyCallByClass,PyArgumentList QtGui.QDesktopServices.openUrl(url)
Generate a PDF from the displayed content.
def update(self): """Update |KB| based on |EQB| and |TInd|. >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> eqb(10.0) >>> tind.value = 10.0 >>> derived.kb.update() >>> derived.kb kb(100.0) """ con = self.subpars.pars.control self(con.eqb*con.tind)
Update |KB| based on |EQB| and |TInd|. >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> eqb(10.0) >>> tind.value = 10.0 >>> derived.kb.update() >>> derived.kb kb(100.0)
def list(self, path=None, with_metadata=False, include_partitions=False): '''Get a list of all of bundle files in the cache. Does not return partition files''' import json sub_path = self.prefix + '/' + path.strip('/') if path else self.prefix l = {} for e in self.bucket.list(sub_path): path = e.name.replace(self.prefix, '', 1).strip('/') if path.startswith('_') or path.startswith('meta'): continue # TODO 'include_partitions' doesn't make any sense outside of ambry if not include_partitions and path.count('/') > 1: continue # partition files if with_metadata: d = self.metadata(path) if d and 'identity' in d: d['identity'] = json.loads(d['identity']) else: d = {} d['caches'] = [self.repo_id] if path: l[path] = d return l
Get a list of all of bundle files in the cache. Does not return partition files
def try_get_dn_string(subject, shorten=False): """ Returns DN as a string :param subject: :param shorten: :return: """ try: from cryptography.x509.oid import NameOID from cryptography.x509 import ObjectIdentifier oid_names = { getattr(NameOID, 'COMMON_NAME', ObjectIdentifier("2.5.4.3")): "CN", getattr(NameOID, 'COUNTRY_NAME', ObjectIdentifier("2.5.4.6")): "C", getattr(NameOID, 'LOCALITY_NAME', ObjectIdentifier("2.5.4.7")): "L", getattr(NameOID, 'STATE_OR_PROVINCE_NAME', ObjectIdentifier("2.5.4.8")): "ST", getattr(NameOID, 'STREET_ADDRESS', ObjectIdentifier("2.5.4.9")): "St", getattr(NameOID, 'ORGANIZATION_NAME', ObjectIdentifier("2.5.4.10")): "O", getattr(NameOID, 'ORGANIZATIONAL_UNIT_NAME', ObjectIdentifier("2.5.4.11")): "OU", getattr(NameOID, 'SERIAL_NUMBER', ObjectIdentifier("2.5.4.5")): "SN", getattr(NameOID, 'USER_ID', ObjectIdentifier("0.9.2342.19200300.100.1.1")): "userID", getattr(NameOID, 'DOMAIN_COMPONENT', ObjectIdentifier("0.9.2342.19200300.100.1.25")): "domainComponent", getattr(NameOID, 'EMAIL_ADDRESS', ObjectIdentifier("1.2.840.113549.1.9.1")): "emailAddress", getattr(NameOID, 'POSTAL_CODE', ObjectIdentifier("2.5.4.17")): "ZIP", } ret = [] try: for attribute in subject: oid = attribute.oid dot = oid.dotted_string oid_name = oid_names[oid] if shorten and oid in oid_names else oid._name val = attribute.value ret.append('%s: %s' % (oid_name, val)) except: pass return ', '.join(ret) except Exception as e: logger.warning('Unexpected error: %s' % e) return 'N/A'
Returns DN as a string :param subject: :param shorten: :return:
def find_region_end(self, lines): """Find the end of the region started with start and end markers""" if self.metadata and 'cell_type' in self.metadata: self.cell_type = self.metadata.pop('cell_type') else: self.cell_type = 'code' parser = StringParser(self.language or self.default_language) for i, line in enumerate(lines): # skip cell header if self.metadata is not None and i == 0: continue if parser.is_quoted(): parser.read_line(line) continue parser.read_line(line) # New code region # Simple code pattern in LightScripts must be preceded with a blank line if self.start_code_re.match(line) or ( self.simple_start_code_re and self.simple_start_code_re.match(line) and (self.cell_marker_start or i == 0 or _BLANK_LINE.match(lines[i - 1]))): if self.explicit_end_marker_required: # Metadata here was conditioned on finding an explicit end marker # before the next start marker. So we dismiss it. self.metadata = None self.language = None if i > 0 and _BLANK_LINE.match(lines[i - 1]): if i > 1 and _BLANK_LINE.match(lines[i - 2]): return i - 2, i, False return i - 1, i, False return i, i, False if not self.ignore_end_marker and self.end_code_re: if self.end_code_re.match(line): return i, i + 1, True elif _BLANK_LINE.match(line): if not next_code_is_indented(lines[i:]): if i > 0: return i, i + 1, False if len(lines) > 1 and not _BLANK_LINE.match(lines[1]): return 1, 1, False return 1, 2, False return len(lines), len(lines), False
Find the end of the region started with start and end markers
def ignore_rules_for_url(spider, url): """ Returns a list of ignore rules from the given spider, that are relevant to the given URL. """ ignore_rules = getattr(spider, "pa11y_ignore_rules", {}) or {} return itertools.chain.from_iterable( rule_list for url_glob, rule_list in ignore_rules.items() if fnmatch.fnmatch(url, url_glob) )
Returns a list of ignore rules from the given spider, that are relevant to the given URL.
def started_tasks(self, task_registry_id=None, task_cls=None): """ Return tasks that was started. Result way be filtered by the given arguments. :param task_registry_id: if it is specified, then try to return single task which id is the same as \ this value. :param task_cls: if it is specified then result will be consists of this subclass only :return: None or WTask or tuple of WTask """ if task_registry_id is not None: task = None for registered_task in self.__started: if registered_task.__registry_tag__ == task_registry_id: task = registered_task if task_cls is not None and task is not None: if isinstance(task, task_cls) is True: return task return None return task result = filter(lambda x: x is not None, self.__started) if task_cls is not None: result = filter(lambda x: isinstance(x, task_cls), result) return tuple(result)
Return tasks that was started. Result way be filtered by the given arguments. :param task_registry_id: if it is specified, then try to return single task which id is the same as \ this value. :param task_cls: if it is specified then result will be consists of this subclass only :return: None or WTask or tuple of WTask
def sanitize_text(text): """Make a safe representation of a string. Note: the `\s` special character matches any whitespace character. This is equivalent to the set [\t\n\r\f\v] as well as ` ` (whitespace).""" # First replace characters that have specific effects with their repr # text = re.sub("(\s)", lambda m: repr(m.group(0)).strip("'"), text) # Make it a unicode string (the try supports python 2 and 3) # try: text = text.decode('utf-8') except AttributeError: pass # Normalize it “ text = unicodedata.normalize('NFC', text) return text
Make a safe representation of a string. Note: the `\s` special character matches any whitespace character. This is equivalent to the set [\t\n\r\f\v] as well as ` ` (whitespace).
def normaliseWV(wV, normFac=1.0): """ make char probs divisible by one """ f = sum(wV) / normFac return [ i/f for i in wV ]
make char probs divisible by one
def DeviceFactory(id, lib=None): """Create the correct device instance based on device type and return it. :return: a :class:`Device` or :class:`DeviceGroup` instance. """ lib = lib or Library() if lib.tdGetDeviceType(id) == const.TELLSTICK_TYPE_GROUP: return DeviceGroup(id, lib=lib) return Device(id, lib=lib)
Create the correct device instance based on device type and return it. :return: a :class:`Device` or :class:`DeviceGroup` instance.
def start(self, positionals=None): '''start the helper flow. We check helper system configurations to determine components that should be collected for the submission. This is where the client can also pass on any extra (positional) arguments in a list from the user. ''' bot.info('[helpme|%s]' %(self.name)) self.speak() self._start(positionals)
start the helper flow. We check helper system configurations to determine components that should be collected for the submission. This is where the client can also pass on any extra (positional) arguments in a list from the user.
def dsort(fname, order, has_header=True, frow=0, ofname=None): r""" Sort file data. :param fname: Name of the comma-separated values file to sort :type fname: FileNameExists_ :param order: Sort order :type order: :ref:`CsvColFilter` :param has_header: Flag that indicates whether the comma-separated values file to sort has column headers in its first line (True) or not (False) :type has_header: boolean :param frow: First data row (starting from 1). If 0 the row where data starts is auto-detected as the first row that has a number (integer of float) in at least one of its columns :type frow: NonNegativeInteger_ :param ofname: Name of the output comma-separated values file, the file that will contain the sorted data. If None the sorting is done "in place" :type ofname: FileName_ or None .. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for pcsv.dsort.dsort :raises: * OSError (File *[fname]* could not be found) * RuntimeError (Argument \`fname\` is not valid) * RuntimeError (Argument \`frow\` is not valid) * RuntimeError (Argument \`has_header\` is not valid) * RuntimeError (Argument \`ofname\` is not valid) * RuntimeError (Argument \`order\` is not valid) * RuntimeError (Column headers are not unique in file *[fname]*) * RuntimeError (File *[fname]* has no valid data) * RuntimeError (File *[fname]* is empty) * RuntimeError (Invalid column specification) * ValueError (Column *[column_identifier]* not found) .. [[[end]]] """ ofname = fname if ofname is None else ofname obj = CsvFile(fname=fname, has_header=has_header, frow=frow) obj.dsort(order) obj.write(fname=ofname, header=has_header, append=False)
r""" Sort file data. :param fname: Name of the comma-separated values file to sort :type fname: FileNameExists_ :param order: Sort order :type order: :ref:`CsvColFilter` :param has_header: Flag that indicates whether the comma-separated values file to sort has column headers in its first line (True) or not (False) :type has_header: boolean :param frow: First data row (starting from 1). If 0 the row where data starts is auto-detected as the first row that has a number (integer of float) in at least one of its columns :type frow: NonNegativeInteger_ :param ofname: Name of the output comma-separated values file, the file that will contain the sorted data. If None the sorting is done "in place" :type ofname: FileName_ or None .. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for pcsv.dsort.dsort :raises: * OSError (File *[fname]* could not be found) * RuntimeError (Argument \`fname\` is not valid) * RuntimeError (Argument \`frow\` is not valid) * RuntimeError (Argument \`has_header\` is not valid) * RuntimeError (Argument \`ofname\` is not valid) * RuntimeError (Argument \`order\` is not valid) * RuntimeError (Column headers are not unique in file *[fname]*) * RuntimeError (File *[fname]* has no valid data) * RuntimeError (File *[fname]* is empty) * RuntimeError (Invalid column specification) * ValueError (Column *[column_identifier]* not found) .. [[[end]]]
def _mib_register(ident, value, the_mib, unresolved): """Internal function used to register an OID and its name in a MIBDict""" if ident in the_mib or ident in unresolved: return ident in the_mib resval = [] not_resolved = 0 for v in value: if _mib_re_integer.match(v): resval.append(v) else: v = fixname(plain_str(v)) if v not in the_mib: not_resolved = 1 if v in the_mib: v = the_mib[v] elif v in unresolved: v = unresolved[v] if isinstance(v, list): resval += v else: resval.append(v) if not_resolved: unresolved[ident] = resval return False else: the_mib[ident] = resval keys = list(unresolved) i = 0 while i < len(keys): k = keys[i] if _mib_register(k, unresolved[k], the_mib, {}): del(unresolved[k]) del(keys[i]) i = 0 else: i += 1 return True
Internal function used to register an OID and its name in a MIBDict
def find_donor_catchments(self, include_subject_catchment='auto'): """ Find list of suitable donor cachments, ranked by hydrological similarity distance measure. This method is implicitly called when calling the :meth:`.growth_curve` method unless the attribute :attr:`.donor_catchments` is set manually. The results are stored in :attr:`.donor_catchments`. The (list of) :class:`floodestimation.entities.Catchment` will have an additional attribute :attr:`similarity_dist`. :param include_subject_catchment: - `auto`: include subject catchment if suitable for pooling and if urbext2000 < 0.03 - `force`: always include subject catchment - `exclude`: do not include the subject catchment :type include_subject_catchment: str """ # Only if we have access to db with gauged catchment data if self.gauged_cachments: self.donor_catchments = self.gauged_cachments. \ most_similar_catchments(subject_catchment=self.catchment, similarity_dist_function=lambda c1, c2: self._similarity_distance(c1, c2), include_subject_catchment=include_subject_catchment) else: self.donor_catchments = []
Find list of suitable donor cachments, ranked by hydrological similarity distance measure. This method is implicitly called when calling the :meth:`.growth_curve` method unless the attribute :attr:`.donor_catchments` is set manually. The results are stored in :attr:`.donor_catchments`. The (list of) :class:`floodestimation.entities.Catchment` will have an additional attribute :attr:`similarity_dist`. :param include_subject_catchment: - `auto`: include subject catchment if suitable for pooling and if urbext2000 < 0.03 - `force`: always include subject catchment - `exclude`: do not include the subject catchment :type include_subject_catchment: str
def sample(self, multiplicity): r""" Randomly sample azimuthal angles `\phi`. :param int multiplicity: Number to sample. :returns: Array of sampled angles. """ if self._n is None: return self._uniform_phi(multiplicity) # Since the flow PDF does not have an analytic inverse CDF, I use a # simple accept-reject sampling algorithm. This is reasonably # efficient since for normal-sized vn, the PDF is close to flat. Now # due to the overhead of Python functions, it's desirable to minimize # the number of calls to the random number generator. Therefore I # sample numbers in chunks; most of the time only one or two chunks # should be needed. Eventually, I might rewrite this with Cython, but # it's fast enough for now. N = 0 # number of phi that have been sampled phi = np.empty(multiplicity) # allocate array for phi pdf_max = 1 + 2*self._vn.sum() # sampling efficiency ~ 1/pdf_max while N < multiplicity: n_remaining = multiplicity - N n_to_sample = int(1.03*pdf_max*n_remaining) phi_chunk = self._uniform_phi(n_to_sample) phi_chunk = phi_chunk[self._pdf(phi_chunk) > np.random.uniform(0, pdf_max, n_to_sample)] K = min(phi_chunk.size, n_remaining) # number of phi to take phi[N:N+K] = phi_chunk[:K] N += K return phi
r""" Randomly sample azimuthal angles `\phi`. :param int multiplicity: Number to sample. :returns: Array of sampled angles.
def init_db(drop_all=False, bind=engine): """Initialize the database, optionally dropping existing tables.""" try: if drop_all: Base.metadata.drop_all(bind=bind) Base.metadata.create_all(bind=bind) except OperationalError as err: msg = 'password authentication failed for user "dallinger"' if msg in err.message: sys.stderr.write(db_user_warning) raise return session
Initialize the database, optionally dropping existing tables.
def _populate_stub(self, name, stub, table): """ Populate the placeholders in the migration stub. :param name: The name of the migration :type name: str :param stub: The stub :type stub: str :param table: The table name :type table: str :rtype: str """ stub = stub.replace('DummyClass', self._get_class_name(name)) if table is not None: stub = stub.replace('dummy_table', table) return stub
Populate the placeholders in the migration stub. :param name: The name of the migration :type name: str :param stub: The stub :type stub: str :param table: The table name :type table: str :rtype: str
def connect_proxy(self, proxy_host='localhost', proxy_port=0, proxy_type=socks.HTTP, host='localhost', port=0): """Connect to a host on a given port via proxy server If the hostname ends with a colon (`:') followed by a number, and there is no port specified, that suffix will be stripped off and the number interpreted as the port number to use. Note: This method is automatically invoked by __init__, if a host and proxy server are specified during instantiation. :param proxy_host: Hostname of proxy server :type proxy_host: string :param proxy_port: Port of proxy server, by default port for specified proxy type is used :type proxy_port: int :param proxy_type: Proxy type to use (see socks.PROXY_TYPES for details) :type proxy_type: int :param host: Hostname of SMTP server :type host: string :param port: Port of SMTP server, by default smtplib.SMTP_PORT is used :type port: int :return: Tuple of (code, msg) :rtype: tuple """ if proxy_type not in socks.DEFAULT_PORTS.keys(): raise NotSupportedProxyType (proxy_host, proxy_port) = self._parse_host(host=proxy_host, port=proxy_port) if not proxy_port: proxy_port = socks.DEFAULT_PORTS[proxy_type] (host, port) = self._parse_host(host=host, port=port) if self.debuglevel > 0: self._print_debug('connect: via proxy', proxy_host, proxy_port) s = socks.socksocket() s.set_proxy(proxy_type=proxy_type, addr=proxy_host, port=proxy_port) s.settimeout(self.timeout) if self.source_address is not None: s.bind(self.source_address) s.connect((host, port)) # todo # Send CRLF in order to get first response from destination server. # Probably it's needed only for HTTP proxies. Further investigation required. s.sendall(bCRLF) self.sock = s (code, msg) = self.getreply() if self.debuglevel > 0: self._print_debug('connect:', repr(msg)) return code, msg
Connect to a host on a given port via proxy server If the hostname ends with a colon (`:') followed by a number, and there is no port specified, that suffix will be stripped off and the number interpreted as the port number to use. Note: This method is automatically invoked by __init__, if a host and proxy server are specified during instantiation. :param proxy_host: Hostname of proxy server :type proxy_host: string :param proxy_port: Port of proxy server, by default port for specified proxy type is used :type proxy_port: int :param proxy_type: Proxy type to use (see socks.PROXY_TYPES for details) :type proxy_type: int :param host: Hostname of SMTP server :type host: string :param port: Port of SMTP server, by default smtplib.SMTP_PORT is used :type port: int :return: Tuple of (code, msg) :rtype: tuple
def validate_bucket(self): """ Do a quick check to see if the s3 bucket is valid :return: """ s3_check_cmd = "aws s3 ls s3://{} --profile '{}' --region '{}'".format(self.bucket_name, self.aws_project, self.aws_regions[0]) print "Checking for s3 bucket" try: subprocess.check_output(shlex.split(s3_check_cmd)) except subprocess.CalledProcessError as e: print "Error: {}".format(e) print "Unable to query s3 bucket: {}. Validate that it exists, and your user has sufficient permissions"\ .format(self.bucket_name) sys.exit(5)
Do a quick check to see if the s3 bucket is valid :return:
def get_available_symbols(**kwargs): """ MOVED to iexfinance.refdata.get_symbols """ import warnings warnings.warn(WNG_MSG % ("get_available_symbols", "refdata.get_symbols")) _ALL_SYMBOLS_URL = "https://api.iextrading.com/1.0/ref-data/symbols" handler = _IEXBase(**kwargs) response = handler._execute_iex_query(_ALL_SYMBOLS_URL) if not response: raise IEXQueryError("Could not download all symbols") else: return response
MOVED to iexfinance.refdata.get_symbols
def flush_and_refresh(self, index): """Flush and refresh one or more indices. .. warning:: Do not call this method unless you know what you are doing. This method is only intended to be called during tests. """ self.client.indices.flush(wait_if_ongoing=True, index=index) self.client.indices.refresh(index=index) self.client.cluster.health( wait_for_status='yellow', request_timeout=30) return True
Flush and refresh one or more indices. .. warning:: Do not call this method unless you know what you are doing. This method is only intended to be called during tests.
def encode(self): """ Encodes the value of the field and put it in the element also make the check for nil=true if there is one :return: returns the encoded element :rtype: xml.etree.ElementTree.Element """ element = ElementTree.Element(self.name) element = self._set_nil(element, lambda value: str(value)) return element
Encodes the value of the field and put it in the element also make the check for nil=true if there is one :return: returns the encoded element :rtype: xml.etree.ElementTree.Element
def destroyCommit(self, varBind, **context): """Destroy Managed Object Instance. Implements the second of the multi-step workflow similar to the SNMP SET command processing (:RFC:`1905#section-4.2.5`). The goal of the second phase is to actually remove requested Managed Object Instance from the MIB tree. When multiple Managed Objects Instances are destroyed/modified at once (likely coming all in one SNMP PDU), each of them has to run through the second (*commit*) phase successfully for the system to transition to the third (*cleanup*) phase. If any single *commit* step fails, the system transitions into the *undo* state for each of Managed Objects Instances being processed at once. The role of this object in the MIB tree is non-terminal. It does not access the actual Managed Object Instance, but just traverses one level down the MIB tree and hands off the query to the underlying objects. Parameters ---------- varBind: :py:class:`~pysnmp.smi.rfc1902.ObjectType` object representing new Managed Object Instance value to destroy Other Parameters ---------------- \*\*context: Query parameters: * `cbFun` (callable) - user-supplied callable that is invoked to pass the new value of the Managed Object Instance or an error. * `instances` (dict): user-supplied dict for temporarily holding Managed Objects Instances being destroyed. Notes ----- The callback functions (e.g. `cbFun`) have the same signature as this method where `varBind` contains the new Managed Object Instance value. In case of an error, the `error` key in the `context` dict will contain an exception object. """ name, val = varBind (debug.logger & debug.FLAG_INS and debug.logger('%s: destroyCommit(%s, %r)' % (self, name, val))) instances = context['instances'].setdefault(self.name, {self.ST_CREATE: {}, self.ST_DESTROY: {}}) idx = context['idx'] # NOTE: multiple names are possible in a single PDU, that could collide # Therefore let's keep old object indexed by (negative) var-bind index try: instances[self.ST_DESTROY][-idx - 1] = self._vars.pop(name) except KeyError: pass cbFun = context['cbFun'] cbFun(varBind, **context)
Destroy Managed Object Instance. Implements the second of the multi-step workflow similar to the SNMP SET command processing (:RFC:`1905#section-4.2.5`). The goal of the second phase is to actually remove requested Managed Object Instance from the MIB tree. When multiple Managed Objects Instances are destroyed/modified at once (likely coming all in one SNMP PDU), each of them has to run through the second (*commit*) phase successfully for the system to transition to the third (*cleanup*) phase. If any single *commit* step fails, the system transitions into the *undo* state for each of Managed Objects Instances being processed at once. The role of this object in the MIB tree is non-terminal. It does not access the actual Managed Object Instance, but just traverses one level down the MIB tree and hands off the query to the underlying objects. Parameters ---------- varBind: :py:class:`~pysnmp.smi.rfc1902.ObjectType` object representing new Managed Object Instance value to destroy Other Parameters ---------------- \*\*context: Query parameters: * `cbFun` (callable) - user-supplied callable that is invoked to pass the new value of the Managed Object Instance or an error. * `instances` (dict): user-supplied dict for temporarily holding Managed Objects Instances being destroyed. Notes ----- The callback functions (e.g. `cbFun`) have the same signature as this method where `varBind` contains the new Managed Object Instance value. In case of an error, the `error` key in the `context` dict will contain an exception object.
async def serialize(self, native=False): ''' Returns a serialized from of the model taking into account projection rules and ``@serialize`` decorated methods. :param native: Deternines if data is serialized to Python native types or primitive form. Defaults to ``False`` ''' data = {} # iterate through all fields for field_name, field in self._fields.items(): # serialize field data raw_data = self._data.get(field_name) # add field's data to model data based on projection settings if field._projection != None: # noqa E711 field_data = await field.serialize(raw_data, native) if field_data: data[field_name] = field_data elif field._projection == True: # noqa E711 data[field_name] = None # iterate through all export methods for name, func in self._serialize_methods.items(): data[name] = await func(self) return data
Returns a serialized from of the model taking into account projection rules and ``@serialize`` decorated methods. :param native: Deternines if data is serialized to Python native types or primitive form. Defaults to ``False``
def vsan_add_disks(host, username, password, protocol=None, port=None, host_names=None): ''' Add any VSAN-eligible disks to the VSAN System for the given host or list of host_names. host The location of the host. username The username used to login to the host, such as ``root``. password The password used to login to the host. protocol Optionally set to alternate protocol if the host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the host is not using the default port. Default port is ``443``. host_names List of ESXi host names. When the host, username, and password credentials are provided for a vCenter Server, the host_names argument is required to tell vCenter which hosts need to add any VSAN-eligible disks to the host's VSAN system. If host_names is not provided, VSAN-eligible disks will be added to the hosts's VSAN system for the ``host`` location instead. This is useful for when service instance connection information is used for a single ESXi host. CLI Example: .. code-block:: bash # Used for single ESXi host connection information salt '*' vsphere.vsan_add_disks my.esxi.host root bad-password # Used for connecting to a vCenter Server salt '*' vsphere.vsan_add_disks my.vcenter.location root bad-password \ host_names='[esxi-1.host.com, esxi-2.host.com]' ''' service_instance = salt.utils.vmware.get_service_instance(host=host, username=username, password=password, protocol=protocol, port=port) host_names = _check_hosts(service_instance, host, host_names) response = _get_vsan_eligible_disks(service_instance, host, host_names) ret = {} for host_name, value in six.iteritems(response): host_ref = _get_host_ref(service_instance, host, host_name=host_name) vsan_system = host_ref.configManager.vsanSystem # We must have a VSAN Config in place before we can manipulate it. if vsan_system is None: msg = 'VSAN System Config Manager is unset for host \'{0}\'. ' \ 'VSAN configuration cannot be changed without a configured ' \ 'VSAN System.'.format(host_name) log.debug(msg) ret.update({host_name: {'Error': msg}}) else: eligible = value.get('Eligible') error = value.get('Error') if eligible and isinstance(eligible, list): # If we have eligible, matching disks, add them to VSAN. try: task = vsan_system.AddDisks(eligible) salt.utils.vmware.wait_for_task(task, host_name, 'Adding disks to VSAN', sleep_seconds=3) except vim.fault.InsufficientDisks as err: log.debug(err.msg) ret.update({host_name: {'Error': err.msg}}) continue except Exception as err: msg = '\'vsphere.vsan_add_disks\' failed for host {0}: {1}'.format(host_name, err) log.debug(msg) ret.update({host_name: {'Error': msg}}) continue log.debug( 'Successfully added disks to the VSAN system for host \'%s\'.', host_name ) # We need to return ONLY the disk names, otherwise Message Pack can't deserialize the disk objects. disk_names = [] for disk in eligible: disk_names.append(disk.canonicalName) ret.update({host_name: {'Disks Added': disk_names}}) elif eligible and isinstance(eligible, six.string_types): # If we have a string type in the eligible value, we don't # have any VSAN-eligible disks. Pull the message through. ret.update({host_name: {'Disks Added': eligible}}) elif error: # If we hit an error, populate the Error return dict for state functions. ret.update({host_name: {'Error': error}}) else: # If we made it this far, we somehow have eligible disks, but they didn't # match the disk list and just got an empty list of matching disks. ret.update({host_name: {'Disks Added': 'No new VSAN-eligible disks were found to add.'}}) return ret
Add any VSAN-eligible disks to the VSAN System for the given host or list of host_names. host The location of the host. username The username used to login to the host, such as ``root``. password The password used to login to the host. protocol Optionally set to alternate protocol if the host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the host is not using the default port. Default port is ``443``. host_names List of ESXi host names. When the host, username, and password credentials are provided for a vCenter Server, the host_names argument is required to tell vCenter which hosts need to add any VSAN-eligible disks to the host's VSAN system. If host_names is not provided, VSAN-eligible disks will be added to the hosts's VSAN system for the ``host`` location instead. This is useful for when service instance connection information is used for a single ESXi host. CLI Example: .. code-block:: bash # Used for single ESXi host connection information salt '*' vsphere.vsan_add_disks my.esxi.host root bad-password # Used for connecting to a vCenter Server salt '*' vsphere.vsan_add_disks my.vcenter.location root bad-password \ host_names='[esxi-1.host.com, esxi-2.host.com]'
def isinstance(self, class_or_string): """ Check whether the node is a instance of `class_or_string`. Unlinke the standard isinstance builtin, the method accepts either a class or a string. In the later case, the string is compared with self.__class__.__name__ (case insensitive). """ if class_or_string is None: return False import inspect if inspect.isclass(class_or_string): return isinstance(self, class_or_string) else: return self.__class__.__name__.lower() == class_or_string.lower()
Check whether the node is a instance of `class_or_string`. Unlinke the standard isinstance builtin, the method accepts either a class or a string. In the later case, the string is compared with self.__class__.__name__ (case insensitive).
def launch_process(self, command): # type: (Union[bytes,text_type])->None """* What you can do - It starts process and keep it. """ if not self.option is None: command_plus_option = self.command + " " + self.option else: command_plus_option = self.command if six.PY3: if shutil.which(command) is None: raise Exception("No command at {}".format(command)) else: self.process_analyzer = pexpect.spawnu(command_plus_option) self.process_id = self.process_analyzer.pid else: doc_command_string = "echo '' | {}".format(command) command_check = os.system(doc_command_string) if not command_check == 0: raise Exception("No command at {}".format(command)) else: self.process_analyzer = pexpect.spawnu(command_plus_option) self.process_id = self.process_analyzer.pid
* What you can do - It starts process and keep it.
def read(self, size = None): """Reads a given number of characters from the response. :param size: The number of characters to read, or "None" to read the entire response. :type size: ``integer`` or "None" """ r = self._buffer self._buffer = b'' if size is not None: size -= len(r) r = r + self._response.read(size) return r
Reads a given number of characters from the response. :param size: The number of characters to read, or "None" to read the entire response. :type size: ``integer`` or "None"
def valid_backbone_bond_lengths(self, atol=0.1): """True if all backbone bonds are within atol Angstroms of the expected distance. Notes ----- Ideal bond lengths taken from [1]. References ---------- .. [1] Schulz, G. E, and R. Heiner Schirmer. Principles Of Protein Structure. New York: Springer-Verlag, 1979. Parameters ---------- atol : float, optional Tolerance value in Angstoms for the absolute deviation away from ideal backbone bond lengths. """ bond_lengths = self.backbone_bond_lengths a1 = numpy.allclose(bond_lengths['n_ca'], [ideal_backbone_bond_lengths['n_ca']] * len(self), atol=atol) a2 = numpy.allclose(bond_lengths['ca_c'], [ideal_backbone_bond_lengths['ca_c']] * len(self), atol=atol) a3 = numpy.allclose(bond_lengths['c_o'], [ideal_backbone_bond_lengths['c_o']] * len(self), atol=atol) a4 = numpy.allclose(bond_lengths['c_n'], [ideal_backbone_bond_lengths['c_n']] * (len(self) - 1), atol=atol) return all([a1, a2, a3, a4])
True if all backbone bonds are within atol Angstroms of the expected distance. Notes ----- Ideal bond lengths taken from [1]. References ---------- .. [1] Schulz, G. E, and R. Heiner Schirmer. Principles Of Protein Structure. New York: Springer-Verlag, 1979. Parameters ---------- atol : float, optional Tolerance value in Angstoms for the absolute deviation away from ideal backbone bond lengths.
def run_job(self, name): ''' Run a schedule job now ''' data = self._get_schedule().get(name, {}) if 'function' in data: func = data['function'] elif 'func' in data: func = data['func'] elif 'fun' in data: func = data['fun'] else: func = None if not isinstance(func, list): func = [func] for _func in func: if _func not in self.functions: log.error( 'Invalid function: %s in scheduled job %s.', _func, name ) if 'name' not in data: data['name'] = name log.info('Running Job: %s', name) # Grab run, assume True run = data.get('run', True) if run: self._run_job(_func, data)
Run a schedule job now
def runBasic(noiseLevel=None, profile=False): """ Runs a basic experiment on continuous locations, learning a few locations on four basic objects, and inferring one of them. This experiment is mostly used for testing the pipeline, as the learned locations are too random and sparse to actually perform inference. Parameters: ---------------------------- @param noiseLevel (float) Noise level to add to the locations and features during inference @param profile (bool) If True, the network will be profiled after learning and inference """ exp = L4L2Experiment( "basic_continuous", numCorticalColumns=2 ) objects = createObjectMachine( machineType="continuous", numInputBits=21, sensorInputSize=1024, externalInputSize=1024, numCorticalColumns=2, ) objects.addObject(Sphere(radius=20), name="sphere") objects.addObject(Cylinder(height=50, radius=20), name="cylinder") objects.addObject(Box(dimensions=[10, 20, 30,]), name="box") objects.addObject(Cube(width=20), name="cube") learnConfig = { "sphere": [("surface", 10)], # the two learning config below will be exactly the same "box": [("face", 5), ("edge", 5), ("vertex", 5)], "cube": [(feature, 5) for feature in objects["cube"].getFeatures()], "cylinder": [(feature, 5) for feature in objects["cylinder"].getFeatures()] } exp.learnObjects( objects.provideObjectsToLearn(learnConfig, plot=True), reset=True ) if profile: exp.printProfile() inferConfig = { "numSteps": 4, "noiseLevel": noiseLevel, "objectName": "cube", "pairs": { 0: ["face", "face", "edge", "edge"], 1: ["edge", "face", "face", "edge"] } } exp.infer( objects.provideObjectToInfer(inferConfig, plot=True), objectName="cube", reset=True ) if profile: exp.printProfile() exp.plotInferenceStats( fields=["L2 Representation", "Overlap L2 with object", "L4 Representation"], )
Runs a basic experiment on continuous locations, learning a few locations on four basic objects, and inferring one of them. This experiment is mostly used for testing the pipeline, as the learned locations are too random and sparse to actually perform inference. Parameters: ---------------------------- @param noiseLevel (float) Noise level to add to the locations and features during inference @param profile (bool) If True, the network will be profiled after learning and inference
def _insert_data(self, name, value, timestamp, interval, config, **kwargs): '''Helper to insert data into sql.''' conn = self._client.connect() if not self._update_data(name, value, timestamp, interval, config, conn): try: kwargs = { 'name' : name, 'interval' : interval, 'i_time' : config['i_calc'].to_bucket(timestamp), 'value' : value } if not config['coarse']: kwargs['r_time'] = config['r_calc'].to_bucket(timestamp) stmt = self._table.insert().values(**kwargs) result = conn.execute(stmt) except: # TODO: only catch IntegrityError if not self._update_data(name, value, timestamp, interval, config, conn): raise
Helper to insert data into sql.
def process_form(self, instance, field, form, empty_marker=None, emptyReturnsMarker=False): """Return a list of dictionaries fit for ReferenceResultsField consumption. Only services which have float()able entries in result,min and max field will be included. If any of min, max, or result fields are blank, the row value is ignored here. """ values = {} # Process settings from the reference definition first ref_def = form.get("ReferenceDefinition") ref_def_uid = ref_def and ref_def[0] if ref_def_uid: ref_def_obj = api.get_object_by_uid(ref_def_uid) ref_results = ref_def_obj.getReferenceResults() # store reference results by UID to avoid duplicates rr_by_uid = dict(map(lambda r: (r.get("uid"), r), ref_results)) values.update(rr_by_uid) # selected services service_uids = form.get("uids", []) for uid in service_uids: result = self._get_spec_value(form, uid, "result") if not result: # User has to set a value for result subfield at least continue # If neither min nor max have been set, assume we only accept a # discrete result (like if % of error was 0). s_min = self._get_spec_value(form, uid, "min", result) s_max = self._get_spec_value(form, uid, "max", result) service = api.get_object_by_uid(uid) values[uid] = { "keyword": service.getKeyword(), "uid": uid, "result": result, "min": s_min, "max": s_max } return values.values(), {}
Return a list of dictionaries fit for ReferenceResultsField consumption. Only services which have float()able entries in result,min and max field will be included. If any of min, max, or result fields are blank, the row value is ignored here.
def touch(fpath, times=None, verbose=True): r""" Creates file if it doesnt exist Args: fpath (str): file path times (None): verbose (bool): Example: >>> # DISABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> fpath = '?' >>> times = None >>> verbose = True >>> result = touch(fpath, times, verbose) >>> print(result) References: http://stackoverflow.com/questions/1158076/implement-touch-using-python """ try: if verbose: print('[util_path] touching %r' % fpath) with open(fpath, 'a'): os.utime(fpath, times) except Exception as ex: import utool utool.printex(ex, 'touch %s' % fpath) raise return fpath
r""" Creates file if it doesnt exist Args: fpath (str): file path times (None): verbose (bool): Example: >>> # DISABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> fpath = '?' >>> times = None >>> verbose = True >>> result = touch(fpath, times, verbose) >>> print(result) References: http://stackoverflow.com/questions/1158076/implement-touch-using-python
def get_channel_names(self): """Get list of channel names. Raises a warning if the names are not unique.""" names_s, names_n = self.channel_names_s, self.channel_names_n # Figure out which channel names to use if self._channel_naming == '$PnS': channel_names, channel_names_alternate = names_s, names_n else: channel_names, channel_names_alternate = names_n, names_s if len(channel_names) == 0: channel_names = channel_names_alternate if len(set(channel_names)) != len(channel_names): msg = (u'The default channel names (defined by the {} ' u'parameter in the FCS file) were not unique. To avoid ' u'problems in downstream analysis, the channel names ' u'have been switched to the alternate channel names ' u'defined in the FCS file. To avoid ' u'seeing this warning message, explicitly instruct ' u'the FCS parser to use the alternate channel names by ' u'specifying the channel_naming parameter.') msg = msg.format(self._channel_naming) warnings.warn(msg) channel_names = channel_names_alternate return channel_names
Get list of channel names. Raises a warning if the names are not unique.
def max(self): """ Compute the max across records. """ return self._constructor(self.values.max(axis=self.baseaxes, keepdims=True))
Compute the max across records.
def run(self): # Thread for receiving data from pilight """Receiver thread function called on Client.start().""" logging.debug('Pilight receiver thread started') if not self.callback: raise RuntimeError('No callback function set, cancel readout thread') def handle_messages(messages): """Call callback on each receive message.""" for message in messages: # Loop over received messages if message: # Can be empty due to splitlines message_dict = json.loads(message.decode()) if self.recv_codes_only: # Filter: Only use receiver messages if 'receiver' in message_dict['origin']: if self.veto_repeats: if message_dict.get('repeats', 1) == 1: self.callback(message_dict) else: self.callback(message_dict) else: self.callback(message_dict) while not self._stop_thread.isSet(): try: # Read socket in a non blocking call and interpret data # Sometimes more than one JSON object is in the stream thus # split at \n with self._lock: messages = self.receive_socket.recv(1024).splitlines() handle_messages(messages) except (socket.timeout, ValueError): # No data pass logging.debug('Pilight receiver thread stopped')
Receiver thread function called on Client.start().
def load_config(cls): """ Load global and local configuration files and update if needed.""" config_file = os.path.expanduser(cls.home_config) global_conf = cls.load(config_file, 'global') cls.load(cls.local_config, 'local') # update global configuration if needed cls.update_config(config_file, global_conf)
Load global and local configuration files and update if needed.
def get_packages(): """ Returns the packages used for HaTeMiLe for Python. :return: The packages used for HaTeMiLe for Python. :rtype: list(str) """ packages = find_packages(exclude=['tests']) packages.append('') packages.append('js') packages.append(LOCALES_DIRECTORY) for directory in os.listdir(LOCALES_DIRECTORY): packages.append(LOCALES_DIRECTORY + '.' + directory) return packages
Returns the packages used for HaTeMiLe for Python. :return: The packages used for HaTeMiLe for Python. :rtype: list(str)
def get_symbol(x): """Retrieve recorded computation history as `Symbol`. Parameters ---------- x : NDArray Array representing the head of computation graph. Returns ------- Symbol The retrieved Symbol. """ hdl = SymbolHandle() check_call(_LIB.MXAutogradGetSymbol(x.handle, ctypes.byref(hdl))) return Symbol(hdl)
Retrieve recorded computation history as `Symbol`. Parameters ---------- x : NDArray Array representing the head of computation graph. Returns ------- Symbol The retrieved Symbol.
def verification_start( self, client, mode=None, verification_speed=None, row_doubling="off", phone_number=None, ): """ Start a verification. Uses POST to /verifications interface. :Args: * *client*: (str) Client's Name * *mode*: (str) Verification Mode. Allowed values: "audiopin", "audiopass" * *verification_speed*: (int) Allowed values: 0, 25, 50, 75, 100 * *row_doubling*: (str) Allowed values: "off", "train", "on" * *phone_number*: (str) Phone number to call. :Returns: (dict) Verification record with animation as discussed `here <https://cloud.knuverse.com/docs/api/#api-Verifications-Start_verification>`_. """ data = { "name": client, "user_agent": "knuverse-sdk-python-v%s" % self.version } if mode: data["mode"] = mode if phone_number: data["phone_number"] = phone_number if verification_speed: data["verification_speed"] = verification_speed if row_doubling: data["row_doubling"] = row_doubling response = self._post(url.verifications, body=data) self._check_response(response, 201) return self._create_response(response)
Start a verification. Uses POST to /verifications interface. :Args: * *client*: (str) Client's Name * *mode*: (str) Verification Mode. Allowed values: "audiopin", "audiopass" * *verification_speed*: (int) Allowed values: 0, 25, 50, 75, 100 * *row_doubling*: (str) Allowed values: "off", "train", "on" * *phone_number*: (str) Phone number to call. :Returns: (dict) Verification record with animation as discussed `here <https://cloud.knuverse.com/docs/api/#api-Verifications-Start_verification>`_.
def openfile(filename, mode="rt", *args, expanduser=False, expandvars=False, makedirs=False, **kwargs): """Open filename and return a corresponding file object.""" if filename in ("-", None): return sys.stdin if "r" in mode else sys.stdout if expanduser: filename = os.path.expanduser(filename) if expandvars: filename = os.path.expandvars(filename) if makedirs and ("a" in mode or "w" in mode): parentdir = os.path.dirname(filename) if not os.path.isdir(parentdir): os.makedirs(parentdir) if filename.endswith(".gz"): if gzip is None: raise NotImplementedError _open = gzip.open elif filename.endswith(".bz2"): if bz2 is None: raise NotImplementedError _open = bz2.open elif filename.endswith(".xz") or filename.endswith(".lzma"): if lzma is None: raise NotImplementedError _open = lzma.open else: _open = open return _open(filename, mode, *args, **kwargs)
Open filename and return a corresponding file object.
def plot(self, color='default', ret=False, ax=None): """ Generates a basic 3D visualization. :param color: Polygons color. :type color: matplotlib color, 'default' or 't' (transparent) :param ret: If True, returns the figure. It can be used to add more elements to the plot or to modify it. :type ret: bool :param ax: If a matplotlib axes given, this method will represent the plot on top of this axes. This is used to represent multiple plots from multiple geometries, overlapping them recursively. :type ax: mplot3d.Axes3D, None :returns: None, axes :rtype: mplot3d.Axes3D, bool """ import matplotlib.pylab as plt import mpl_toolkits.mplot3d as mplot3d # Bypass a plot if color == False: if ax is None: ax = mplot3d.Axes3D(fig=plt.figure()) return ax # Clone and extract the information from the object obj = self.__class__(**self.get_seed()) plotable3d = obj.get_plotable3d() # Domain domain = obj.get_domain() bound = np.max(domain[1]-domain[0]) centroid = obj.get_centroid() pos = np.vstack((centroid-bound/2, centroid+bound/2)) # Cascade plot? if ax is None: # Non cascade ax = mplot3d.Axes3D(fig=plt.figure()) else: old_pos = np.array([ax.get_xbound(), ax.get_ybound(), ax.get_zbound()]).T pos = np.dstack((pos, old_pos)) pos = np.array([np.min(pos[0, :, :], axis=1), np.max(pos[1, :, :], axis=1)]) # Plot if color == 'default': color = 't' if color == 't': color = (0,0,0,0) for polygon in plotable3d: polygon.set_facecolor(color) polygon.set_edgecolor('k') ax.add_collection3d(polygon) # Axis limits ax.set_xlim3d(left=pos[0,0], right=pos[1,0]) ax.set_ylim3d(bottom=pos[0,1], top=pos[1,1]) ax.set_zlim3d(bottom=pos[0,2], top=pos[1,2]) if ret: return ax
Generates a basic 3D visualization. :param color: Polygons color. :type color: matplotlib color, 'default' or 't' (transparent) :param ret: If True, returns the figure. It can be used to add more elements to the plot or to modify it. :type ret: bool :param ax: If a matplotlib axes given, this method will represent the plot on top of this axes. This is used to represent multiple plots from multiple geometries, overlapping them recursively. :type ax: mplot3d.Axes3D, None :returns: None, axes :rtype: mplot3d.Axes3D, bool
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'document_retrieval_strategy' ) and self.document_retrieval_strategy is not None: _dict[ 'document_retrieval_strategy'] = self.document_retrieval_strategy return _dict
Return a json dictionary representing this model.
def _changeSubNos(self, path, subNos, count, action, reverse=False): """Implementation of subs add/removal handling. Args: path: file path associated with model on which work is done subNos: list of added/removed subtitle numbers count: function which accepts current sync point's subtitle number and subNos and returns anything based on these values action: action performed for each of sync point's subtitle number. Accepts current SyncPoint.subNo, count result, model and row: def action(current, count, model, row) """ model = self._models.get(path) if model is None: return syncPoints = _syncPoints(model) syncSubNos = [p.subNo for p in syncPoints] syncSubNos.sort() if len(syncSubNos) == 0: return for current in syncSubNos: row = _findRow(current, model) action(current, count(current, subNos), model, row)
Implementation of subs add/removal handling. Args: path: file path associated with model on which work is done subNos: list of added/removed subtitle numbers count: function which accepts current sync point's subtitle number and subNos and returns anything based on these values action: action performed for each of sync point's subtitle number. Accepts current SyncPoint.subNo, count result, model and row: def action(current, count, model, row)
async def _readline(self, reader): """ Readline helper """ ret = await reader.readline() if len(ret) == 0 and reader.at_eof(): raise EOFError() return ret
Readline helper
def status(self): """Check power status""" vm = self.get_vm_failfast(self.config['name']) extra = self.config['extra'] parserFriendly = self.config['parserFriendly'] status_to_print = [] if extra: status_to_print = \ [["vmname", "powerstate", "ipaddress", "hostname", "memory", "cpunum", "uuid", "guestid", "uptime"]] + \ [[vm.name, vm.runtime.powerState, vm.summary.guest.ipAddress or '', vm.summary.guest.hostName or '', str(vm.summary.config.memorySizeMB), str(vm.summary.config.numCpu), vm.summary.config.uuid, vm.summary.guest.guestId, str(vm.summary.quickStats.uptimeSeconds) or '0']] else: status_to_print = [[vm.name, vm.runtime.powerState]] if parserFriendly: self.print_as_lines(status_to_print) else: self.print_as_table(status_to_print)
Check power status
def encode(self, data: mx.sym.Symbol, data_length: mx.sym.Symbol, seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]: """ Encodes data given sequence lengths of individual examples and maximum sequence length. :param data: Input data. :param data_length: Vector with sequence lengths. :param seq_len: Maximum sequence length. :return: Encoded versions of input data (data, data_length, seq_len). """ for encoder in self.encoders: data, data_length, seq_len = encoder.encode(data, data_length, seq_len) return data, data_length, seq_len
Encodes data given sequence lengths of individual examples and maximum sequence length. :param data: Input data. :param data_length: Vector with sequence lengths. :param seq_len: Maximum sequence length. :return: Encoded versions of input data (data, data_length, seq_len).
def add_key_filter(self, *args): """ Add a single key filter to the inputs. :param args: a filter :type args: list :rtype: :class:`RiakMapReduce` """ if self._input_mode == 'query': raise ValueError('Key filters are not supported in a query.') self._key_filters.append(args) return self
Add a single key filter to the inputs. :param args: a filter :type args: list :rtype: :class:`RiakMapReduce`
def calcTransferResistance(self, gid, seg_coords): """Precompute mapping from segment to electrode locations""" sigma = 0.3 # mS/mm # Value used in NEURON extracellular recording example ("extracellular_stim_and_rec") # rho = 35.4 # ohm cm, squid axon cytoplasm = 2.8249e-2 S/cm = 0.028 S/cm = 0.0028 S/mm = 2.8 mS/mm # rho_um = 35.4 * 0.01 = 35.4 / 1e6 * 1e4 = 0.354 Mohm um ~= 3 uS / um = 3000 uS / mm = 3 mS /mm # equivalent sigma value (~3) is 10x larger than Allen (0.3) # if use same sigma value, results are consistent r05 = (seg_coords['p0'] + seg_coords['p1'])/2 dl = seg_coords['p1'] - seg_coords['p0'] nseg = r05.shape[1] tr = np.zeros((self.nsites,nseg)) # tr_NEURON = np.zeros((self.nsites,nseg)) # used to compare with NEURON extracellular example for j in range(self.nsites): # calculate mapping for each site on the electrode rel = np.expand_dims(self.pos[:, j], axis=1) # coordinates of a j-th site on the electrode rel_05 = rel - r05 # distance between electrode and segment centers r2 = np.einsum('ij,ij->j', rel_05, rel_05) # compute dot product column-wise, the resulting array has as many columns as original rlldl = np.einsum('ij,ij->j', rel_05, dl) # compute dot product column-wise, the resulting array has as many columns as original dlmag = np.linalg.norm(dl, axis=0) # length of each segment rll = abs(rlldl/dlmag) # component of r parallel to the segment axis it must be always positive rT2 = r2 - rll**2 # square of perpendicular component up = rll + dlmag/2 low = rll - dlmag/2 num = up + np.sqrt(up**2 + rT2) den = low + np.sqrt(low**2 + rT2) tr[j, :] = np.log(num/den)/dlmag # units of (1/um) use with imemb_ (total seg current) # Consistent with NEURON extracellular recording example # r = np.sqrt(rel_05[0,:]**2 + rel_05[1,:]**2 + rel_05[2,:]**2) # tr_NEURON[j, :] = (rho / 4 / math.pi)*(1/r)*0.01 tr *= 1/(4*math.pi*sigma) # units: 1/um / (mS/mm) = mm/um / mS = 1e3 * kOhm = MOhm self.transferResistances[gid] = tr
Precompute mapping from segment to electrode locations
def adapters(self, adapters): """ Sets the number of Ethernet adapters for this VMware VM instance. :param adapters: number of adapters """ # VMware VMs are limited to 10 adapters if adapters > 10: raise VMwareError("Number of adapters above the maximum supported of 10") self._ethernet_adapters.clear() for adapter_number in range(0, adapters): self._ethernet_adapters[adapter_number] = EthernetAdapter() self._adapters = len(self._ethernet_adapters) log.info("VMware VM '{name}' [{id}] has changed the number of Ethernet adapters to {adapters}".format(name=self.name, id=self.id, adapters=adapters))
Sets the number of Ethernet adapters for this VMware VM instance. :param adapters: number of adapters
def do_work(self): """ Do work """ self._starttime = time.time() if not os.path.isdir(self._dir2): if self._maketarget: if self._verbose: self.log('Creating directory %s' % self._dir2) try: os.makedirs(self._dir2) self._numnewdirs += 1 except Exception as e: self.log(str(e)) return None # All right! self._mainfunc() self._endtime = time.time()
Do work
def managed(name, data, **kwargs): ''' Manage the device configuration given the input data structured according to the YANG models. data YANG structured data. models A list of models to be used when generating the config. profiles: ``None`` Use certain profiles to generate the config. If not specified, will use the platform default profile(s). compliance_report: ``False`` Return the compliance report in the comment. .. versionadded:: 2017.7.3 test: ``False`` Dry run? If set as ``True``, will apply the config, discard and return the changes. Default: ``False`` and will commit the changes on the device. commit: ``True`` Commit? Default: ``True``. debug: ``False`` Debug mode. Will insert a new key under the output dictionary, as ``loaded_config`` containing the raw configuration loaded on the device. replace: ``False`` Should replace the config with the new generate one? State SLS example: .. code-block:: jinja {%- set expected_config = pillar.get('openconfig_interfaces_cfg') -%} interfaces_config: napalm_yang.managed: - data: {{ expected_config | json }} - models: - models.openconfig_interfaces - debug: true Pillar example: .. code-block:: yaml openconfig_interfaces_cfg: _kwargs: filter: true interfaces: interface: Et1: config: mtu: 9000 Et2: config: description: "description example" ''' models = kwargs.get('models', None) if isinstance(models, tuple) and isinstance(models[0], list): models = models[0] ret = salt.utils.napalm.default_ret(name) test = kwargs.get('test', False) or __opts__.get('test', False) debug = kwargs.get('debug', False) or __opts__.get('debug', False) commit = kwargs.get('commit', True) or __opts__.get('commit', True) replace = kwargs.get('replace', False) or __opts__.get('replace', False) return_compliance_report = kwargs.get('compliance_report', False) or __opts__.get('compliance_report', False) profiles = kwargs.get('profiles', []) temp_file = __salt__['temp.file']() log.debug('Creating temp file: %s', temp_file) if 'to_dict' not in data: data = {'to_dict': data} data = [data] with salt.utils.files.fopen(temp_file, 'w') as file_handle: salt.utils.yaml.safe_dump( salt.utils.json.loads(salt.utils.json.dumps(data)), file_handle, encoding='utf-8' ) device_config = __salt__['napalm_yang.parse'](*models, config=True, profiles=profiles) log.debug('Parsed the config from the device:') log.debug(device_config) compliance_report = __salt__['napalm_yang.compliance_report'](device_config, *models, filepath=temp_file) log.debug('Compliance report:') log.debug(compliance_report) complies = compliance_report.get('complies', False) if complies: ret.update({ 'result': True, 'comment': 'Already configured as required.' }) log.debug('All good here.') return ret log.debug('Does not comply, trying to generate and load config') data = data[0]['to_dict'] if '_kwargs' in data: data.pop('_kwargs') loaded_changes = __salt__['napalm_yang.load_config'](data, *models, profiles=profiles, test=test, debug=debug, commit=commit, replace=replace) log.debug('Loaded config result:') log.debug(loaded_changes) __salt__['file.remove'](temp_file) loaded_changes['compliance_report'] = compliance_report return salt.utils.napalm.loaded_ret(ret, loaded_changes, test, debug, opts=__opts__, compliance_report=return_compliance_report)
Manage the device configuration given the input data structured according to the YANG models. data YANG structured data. models A list of models to be used when generating the config. profiles: ``None`` Use certain profiles to generate the config. If not specified, will use the platform default profile(s). compliance_report: ``False`` Return the compliance report in the comment. .. versionadded:: 2017.7.3 test: ``False`` Dry run? If set as ``True``, will apply the config, discard and return the changes. Default: ``False`` and will commit the changes on the device. commit: ``True`` Commit? Default: ``True``. debug: ``False`` Debug mode. Will insert a new key under the output dictionary, as ``loaded_config`` containing the raw configuration loaded on the device. replace: ``False`` Should replace the config with the new generate one? State SLS example: .. code-block:: jinja {%- set expected_config = pillar.get('openconfig_interfaces_cfg') -%} interfaces_config: napalm_yang.managed: - data: {{ expected_config | json }} - models: - models.openconfig_interfaces - debug: true Pillar example: .. code-block:: yaml openconfig_interfaces_cfg: _kwargs: filter: true interfaces: interface: Et1: config: mtu: 9000 Et2: config: description: "description example"
async def mailed_confirm(self, **params): """Sends mail to user after offer receiveing Accepts: - cid - buyer address - price - offer_type - point - coinid """ if not params: return {"error":400, "reason":"Missed required fields"} # Check if required fields exists cid = params.get("cid") buyer_address = params.get("buyer_address") price = params.get("price") offer_type = params.get("offer_type") coinid = params.get("coinid").upper() try: coinid = coinid.replace("TEST", "") except: pass # Check if required fileds if not all([cid, buyer_address, price]): return {"error":400, "reason":"Missed required fields"} # Get content owner address #if coinid in settings.AVAILABLE_COIN_ID: # client_bridge.endpoint = settings.bridges[coinid] #else: # return {"error":400, "reason":"Invalid coin ID"} #owneraddr = await client_bridge.request(method_name="ownerbycid", cid=cid) # Send appropriate mail to seller if exists #seller = await getaccountbywallet(wallet=owneraddr) #logging.debug(seller) #if "error" in seller.keys(): # return seller #if seller.get("email"): # emaildata = { # "to": seller["email"], # "subject": "Robin8 support", # "optional": "You`ve got a new offer from %s" % seller["public_key"] # # } # await client_email.request(method_name="sendmail", **emaildata) # Send news for seller buyer = await getaccountbywallet(wallet=buyer_address) if "error" in buyer.keys(): buyer["public_key"] = None newsdata = { "event_type":"made offer", "cid": cid, "access_string":buyer["public_key"], "buyer_pubkey":buyer["public_key"], "buyer_address":buyer_address, #"owneraddr":owneraddr, "price": price, "offer_type": offer_type, "coinid":coinid } news = await self.insert_news(**newsdata) return {"result":"ok"}
Sends mail to user after offer receiveing Accepts: - cid - buyer address - price - offer_type - point - coinid
def stack(datasets): """First dataset at the bottom.""" base = datasets[0].copy() for dataset in datasets[1:]: base = base.where(dataset.isnull(), dataset) return base
First dataset at the bottom.
def _make_verb_helper(verb_func, add_groups=False): """ Create function that prepares verb for the verb function The functions created add expressions to be evaluated to the verb, then call the core verb function Parameters ---------- verb_func : function Core verb function. This is the function called after expressions created and added to the verb. The core function should be one of those that implement verbs that evaluate expressions. add_groups : bool If True, a groups attribute is added to the verb. The groups are the columns created after evaluating the expressions. Returns ------- out : function A function that implements a helper verb. """ @wraps(verb_func) def _verb_func(verb): verb.expressions, new_columns = build_expressions(verb) if add_groups: verb.groups = new_columns return verb_func(verb) return _verb_func
Create function that prepares verb for the verb function The functions created add expressions to be evaluated to the verb, then call the core verb function Parameters ---------- verb_func : function Core verb function. This is the function called after expressions created and added to the verb. The core function should be one of those that implement verbs that evaluate expressions. add_groups : bool If True, a groups attribute is added to the verb. The groups are the columns created after evaluating the expressions. Returns ------- out : function A function that implements a helper verb.
def split_s3_path(url: str) -> Tuple[str, str]: """Split a full s3 path into the bucket name and path.""" parsed = urlparse(url) if not parsed.netloc or not parsed.path: raise ValueError("bad s3 path {}".format(url)) bucket_name = parsed.netloc s3_path = parsed.path # Remove '/' at beginning of path. if s3_path.startswith("/"): s3_path = s3_path[1:] return bucket_name, s3_path
Split a full s3 path into the bucket name and path.
def merge(args): """ %prog merge output/*.csv > ahrd.csv Merge AHRD results, remove redundant headers, empty lines, etc. If there are multiple lines containing the same ID (first column). Then whatever comes the first will get retained. """ p = OptionParser(merge.__doc__) opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) csvfiles = args cf = csvfiles[0] fp = open(cf) for row in fp: if row.startswith("Protein"): break header = row.rstrip() print(header) seen = set() for cf in csvfiles: fp = open(cf) for row in fp: if row[0] == '#': continue if row.strip() == "": continue if row.strip() == header: continue atoms = row.rstrip().split("\t") id = atoms[0] if id in seen: logging.error("ID `{0}` ignored.".format(id)) continue seen.add(id) print(row.strip())
%prog merge output/*.csv > ahrd.csv Merge AHRD results, remove redundant headers, empty lines, etc. If there are multiple lines containing the same ID (first column). Then whatever comes the first will get retained.
def get_config_status(): ''' Get the status of the current DSC Configuration Returns: dict: A dictionary representing the status of the current DSC Configuration on the machine CLI Example: .. code-block:: bash salt '*' dsc.get_config_status ''' cmd = 'Get-DscConfigurationStatus | ' \ 'Select-Object -Property HostName, Status, MetaData, ' \ '@{Name="StartDate";Expression={Get-Date ($_.StartDate) -Format g}}, ' \ 'Type, Mode, RebootRequested, NumberofResources' try: return _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'No status information available' in exc.info['stderr']: raise CommandExecutionError('Not Configured') raise
Get the status of the current DSC Configuration Returns: dict: A dictionary representing the status of the current DSC Configuration on the machine CLI Example: .. code-block:: bash salt '*' dsc.get_config_status
def update_warning_box(self): """ updates the warning box with whatever the warning_text variable contains for this specimen """ self.warning_box.Clear() if self.warning_text == "": self.warning_box.AppendText("No Problems") else: self.warning_box.AppendText(self.warning_text)
updates the warning box with whatever the warning_text variable contains for this specimen
def set_preshared_key(self, new_key): """ Set the preshared key for this VPN. A pre-shared key is only present when the tunnel type is 'VPN' or the encryption mode is 'transport'. :return: None """ if self.data.get('preshared_key'): self.update(preshared_key=new_key)
Set the preshared key for this VPN. A pre-shared key is only present when the tunnel type is 'VPN' or the encryption mode is 'transport'. :return: None
def _get_side2KerningGroups(self): """ Subclasses may override this method. """ found = {} for name, contents in self.items(): if name.startswith("public.kern2."): found[name] = contents return found
Subclasses may override this method.
def _make_env(resultdir=None): """Loads the env from `resultdir` if not `None` or makes a new one. An Enos environment handles all specific variables of an experiment. This function either generates a new environment or loads a previous one. If the value of `resultdir` is `None`, then this function makes a new environment and return it. If the value is a directory path that contains an Enos environment, then this function loads and returns it. In case of a directory path, this function also rereads the configuration file (the reservation.yaml) and reloads it. This lets the user update his configuration between each phase. Args: resultdir (str): directory path to load the env from. """ env = { "config": {}, # The config "resultdir": "", # Path to the result directory "config_file": "", # The initial config file "nodes": {}, # Roles with nodes "phase": "", # Last phase that have been run "user": "", # User id for this job "cwd": os.getcwd() # Current Working Directory } if resultdir: env_path = os.path.join(resultdir, "env") if os.path.isfile(env_path): with open(env_path, "r") as f: env.update(yaml.load(f)) logger.debug("Loaded environment %s", env_path) if "config_file" in env and env["config_file"] is not None: # Resets the configuration of the environment if os.path.isfile(env["config_file"]): with open(env["config_file"], "r") as f: env["config"].update(yaml.load(f)) logger.debug("Reloaded config %s", env["config"]) return env
Loads the env from `resultdir` if not `None` or makes a new one. An Enos environment handles all specific variables of an experiment. This function either generates a new environment or loads a previous one. If the value of `resultdir` is `None`, then this function makes a new environment and return it. If the value is a directory path that contains an Enos environment, then this function loads and returns it. In case of a directory path, this function also rereads the configuration file (the reservation.yaml) and reloads it. This lets the user update his configuration between each phase. Args: resultdir (str): directory path to load the env from.
def distance_stats_sqr(x, y, **kwargs): """ distance_stats_sqr(x, y, *, exponent=1) Computes the usual (biased) estimators for the squared distance covariance and squared distance correlation between two random vectors, and the individual squared distance variances. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. exponent: float Exponent of the Euclidean distance, in the range :math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of fractional Brownian motion. Returns ------- Stats Squared distance covariance, squared distance correlation, squared distance variance of the first random vector and squared distance variance of the second random vector. See Also -------- distance_covariance_sqr distance_correlation_sqr Notes ----- It is less efficient to compute the statistics separately, rather than using this function, because some computations can be shared. The algorithm uses the fast distance covariance algorithm proposed in :cite:`b-fast_distance_correlation` when possible. Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> dcor.distance_stats_sqr(a, a) # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=52.0, correlation_xy=1.0, variance_x=52.0, variance_y=52.0) >>> dcor.distance_stats_sqr(a, b) # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=1.0, correlation_xy=0.2773500..., variance_x=52.0, variance_y=0.25) >>> dcor.distance_stats_sqr(b, b) # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=0.25, correlation_xy=1.0, variance_x=0.25, variance_y=0.25) >>> dcor.distance_stats_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS ... # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=0.3705904..., correlation_xy=0.4493308..., variance_x=2.7209220..., variance_y=0.25) """ if _can_use_fast_algorithm(x, y, **kwargs): return _distance_stats_sqr_fast(x, y) else: return _distance_sqr_stats_naive_generic( x, y, matrix_centered=_distance_matrix, product=mean_product, **kwargs)
distance_stats_sqr(x, y, *, exponent=1) Computes the usual (biased) estimators for the squared distance covariance and squared distance correlation between two random vectors, and the individual squared distance variances. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. exponent: float Exponent of the Euclidean distance, in the range :math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of fractional Brownian motion. Returns ------- Stats Squared distance covariance, squared distance correlation, squared distance variance of the first random vector and squared distance variance of the second random vector. See Also -------- distance_covariance_sqr distance_correlation_sqr Notes ----- It is less efficient to compute the statistics separately, rather than using this function, because some computations can be shared. The algorithm uses the fast distance covariance algorithm proposed in :cite:`b-fast_distance_correlation` when possible. Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> dcor.distance_stats_sqr(a, a) # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=52.0, correlation_xy=1.0, variance_x=52.0, variance_y=52.0) >>> dcor.distance_stats_sqr(a, b) # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=1.0, correlation_xy=0.2773500..., variance_x=52.0, variance_y=0.25) >>> dcor.distance_stats_sqr(b, b) # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=0.25, correlation_xy=1.0, variance_x=0.25, variance_y=0.25) >>> dcor.distance_stats_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS ... # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=0.3705904..., correlation_xy=0.4493308..., variance_x=2.7209220..., variance_y=0.25)
def switch_to_plugin(self): """Switch to plugin.""" # Unmaxizime currently maximized plugin if (self.main.last_plugin is not None and self.main.last_plugin.ismaximized and self.main.last_plugin is not self): self.main.maximize_dockwidget() # Show plugin only if it was already visible if self.get_option('visible_if_project_open'): if not self.toggle_view_action.isChecked(): self.toggle_view_action.setChecked(True) self.visibility_changed(True)
Switch to plugin.
def _AddPropertiesForRepeatedField(field, cls): """Adds a public property for a "repeated" protocol message field. Clients can use this property to get the value of the field, which will be either a _RepeatedScalarFieldContainer or _RepeatedCompositeFieldContainer (see below). Note that when clients add values to these containers, we perform type-checking in the case of repeated scalar fields, and we also set any necessary "has" bits as a side-effect. Args: field: A FieldDescriptor for this field. cls: The class we're constructing. """ proto_field_name = field.name property_name = _PropertyName(proto_field_name) def getter(self): field_value = self._fields.get(field) if field_value is None: # Construct a new object to represent this field. field_value = field._default_constructor(self) # Atomically check if another thread has preempted us and, if not, swap # in the new object we just created. If someone has preempted us, we # take that object and discard ours. # WARNING: We are relying on setdefault() being atomic. This is true # in CPython but we haven't investigated others. This warning appears # in several other locations in this file. field_value = self._fields.setdefault(field, field_value) return field_value getter.__module__ = None getter.__doc__ = 'Getter for %s.' % proto_field_name # We define a setter just so we can throw an exception with a more # helpful error message. def setter(self, new_value): raise AttributeError('Assignment not allowed to repeated field ' '"%s" in protocol message object.' % proto_field_name) doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name setattr(cls, property_name, property(getter, setter, doc=doc))
Adds a public property for a "repeated" protocol message field. Clients can use this property to get the value of the field, which will be either a _RepeatedScalarFieldContainer or _RepeatedCompositeFieldContainer (see below). Note that when clients add values to these containers, we perform type-checking in the case of repeated scalar fields, and we also set any necessary "has" bits as a side-effect. Args: field: A FieldDescriptor for this field. cls: The class we're constructing.
def _print_image(self, line, size): """ Print formatted image """ i = 0 cont = 0 buffer = "" self._raw(S_RASTER_N) buffer = "%02X%02X%02X%02X" % (((size[0]/size[1])/8), 0, size[1], 0) self._raw(buffer.decode('hex')) buffer = "" while i < len(line): hex_string = int(line[i:i+8],2) buffer += "%02X" % hex_string i += 8 cont += 1 if cont % 4 == 0: self._raw(buffer.decode("hex")) buffer = "" cont = 0
Print formatted image
def delete_hosting_device_resources(self, context, tenant_id, mgmt_port, **kwargs): """Deletes resources for a hosting device in a plugin specific way.""" if mgmt_port is not None: try: self._cleanup_hosting_port(context, mgmt_port['id']) except n_exc.NeutronException as e: LOG.error("Unable to delete port:%(port)s after %(tries)d" " attempts due to exception %(exception)s. " "Skipping it", {'port': mgmt_port['id'], 'tries': DELETION_ATTEMPTS, 'exception': str(e)})
Deletes resources for a hosting device in a plugin specific way.
def overlay_gateway_site_extend_vlan_add(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels") name_key = ET.SubElement(overlay_gateway, "name") name_key.text = kwargs.pop('name') site = ET.SubElement(overlay_gateway, "site") name_key = ET.SubElement(site, "name") name_key.text = kwargs.pop('name') extend = ET.SubElement(site, "extend") vlan = ET.SubElement(extend, "vlan") add = ET.SubElement(vlan, "add") add.text = kwargs.pop('add') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def process(self): """Checks whether the paths exists and updates the state accordingly.""" for path in self._paths: if os.path.exists(path): self.state.output.append((os.path.basename(path), path)) else: self.state.add_error( 'Path {0:s} does not exist'.format(str(path)), critical=False) if not self.state.output: self.state.add_error('No valid paths collected, bailing', critical=True)
Checks whether the paths exists and updates the state accordingly.
def exists(cls, excludes_, **filters): """ Return `True` if objects matching the provided filters and excludes exist if not return false. Calls the `filter` method by default, but can be overridden for better and quicker implementations that may be supported by a database. :param excludes_: entities without this combination of field name and values will be returned """ results = cls.query.filter(**filters).exclude(**excludes_) return bool(results)
Return `True` if objects matching the provided filters and excludes exist if not return false. Calls the `filter` method by default, but can be overridden for better and quicker implementations that may be supported by a database. :param excludes_: entities without this combination of field name and values will be returned
def write_configuration_file(filepath=_give_default_file_path(), overwrite=False): """Create a configuration file. Writes the current state of settings into a configuration file. .. note:: Since a file is permamently written, this function is strictly speaking not sideeffect free. Args: filepath (str): Where to write the file. The default is under both UNIX and Windows ``~/.chemcoordrc``. overwrite (bool): Returns: None: """ config = configparser.ConfigParser() config.read_dict(settings) if os.path.isfile(filepath) and not overwrite: try: raise FileExistsError except NameError: # because of python2 warn('File exists already and overwrite is False (default).') else: with open(filepath, 'w') as configfile: config.write(configfile)
Create a configuration file. Writes the current state of settings into a configuration file. .. note:: Since a file is permamently written, this function is strictly speaking not sideeffect free. Args: filepath (str): Where to write the file. The default is under both UNIX and Windows ``~/.chemcoordrc``. overwrite (bool): Returns: None:
def is_byte_range_valid(start, stop, length): """Checks if a given byte content range is valid for the given length. .. versionadded:: 0.7 """ if (start is None) != (stop is None): return False elif start is None: return length is None or length >= 0 elif length is None: return 0 <= start < stop elif start >= stop: return False return 0 <= start < length
Checks if a given byte content range is valid for the given length. .. versionadded:: 0.7
def __get_event(self, block=True, timeout=1): """ Retrieves an event. If self._exceeding_event is not None, it'll be returned. Otherwise, an event is dequeued from the event buffer. If The event which was retrieved is bigger than the permitted batch size, it'll be omitted, and the next event in the event buffer is returned """ while True: if self._exceeding_event: # An event was omitted from last batch event = self._exceeding_event self._exceeding_event = None else: # No omitted event, get an event from the queue event = self._event_queue.get(block, timeout) event_size = len(event) # If the event is bigger than the permitted batch size, ignore it # The ( - 2 ) accounts for the parentheses enclosing the batch if event_size - 2 >= self._batch_max_size: self._notify(logging.WARNING, consts.LOG_MSG_OMITTED_OVERSIZED_EVENT % event_size) else: # Event is of valid size, return it return event
Retrieves an event. If self._exceeding_event is not None, it'll be returned. Otherwise, an event is dequeued from the event buffer. If The event which was retrieved is bigger than the permitted batch size, it'll be omitted, and the next event in the event buffer is returned
def retry_on_ec2_error(self, func, *args, **kwargs): """ Call the given method with the given arguments, retrying if the call failed due to an EC2ResponseError. This method will wait at most 30 seconds and perform up to 6 retries. If the method still fails, it will propagate the error. :param func: Function to call :type func: function """ exception_retry_count = 6 while True: try: return func(*args, **kwargs) except (boto.exception.EC2ResponseError, ssl.SSLError) as msg: exception_retry_count -= 1 if exception_retry_count <= 0: raise msg time.sleep(5)
Call the given method with the given arguments, retrying if the call failed due to an EC2ResponseError. This method will wait at most 30 seconds and perform up to 6 retries. If the method still fails, it will propagate the error. :param func: Function to call :type func: function
def _perform_validation(self, path, value, results): """ Validates a given value against the schema and configured validation rules. :param path: a dot notation path to the value. :param value: a value to be validated. :param results: a list with validation results to add new results. """ path = self.name if path == None or len(path) == 0 else path + "." + self.name super(PropertySchema, self)._perform_validation(path, value, results) self._perform_type_validation(path, self.value_type, value, results)
Validates a given value against the schema and configured validation rules. :param path: a dot notation path to the value. :param value: a value to be validated. :param results: a list with validation results to add new results.
def get_color_mode(mode): """Convert PIL mode to ColorMode.""" name = mode.upper() name = name.rstrip('A') # Trim alpha. name = {'1': 'BITMAP', 'L': 'GRAYSCALE'}.get(name, name) return getattr(ColorMode, name)
Convert PIL mode to ColorMode.
def _sanitizeFilename(filename): """Sanitizes filename for use on Windows and other brain-dead systems, by replacing a number of illegal characters with underscores.""" global _sanitize_trans out = filename.translate(_sanitize_trans) # leading dot becomes "_" if out and out[0] == '.': out = out[1:] return out
Sanitizes filename for use on Windows and other brain-dead systems, by replacing a number of illegal characters with underscores.
def get_state(self, force_update=False): """ Returns 0 if off and 1 if on. """ if force_update or self._state is None: return int(self.basicevent.GetBinaryState()['BinaryState']) return self._state
Returns 0 if off and 1 if on.
def add(self, logical_id, property, value): """ Add the information that resource with given `logical_id` supports the given `property`, and that a reference to `logical_id.property` resolves to given `value. Example: "MyApi.Deployment" -> "MyApiDeployment1234567890" :param logical_id: Logical ID of the resource (Ex: MyLambdaFunction) :param property: Property on the resource that can be referenced (Ex: Alias) :param value: Value that this reference resolves to. :return: nothing """ if not logical_id or not property: raise ValueError("LogicalId and property must be a non-empty string") if not value or not isinstance(value, string_types): raise ValueError("Property value must be a non-empty string") if logical_id not in self._refs: self._refs[logical_id] = {} if property in self._refs[logical_id]: raise ValueError("Cannot add second reference value to {}.{} property".format(logical_id, property)) self._refs[logical_id][property] = value
Add the information that resource with given `logical_id` supports the given `property`, and that a reference to `logical_id.property` resolves to given `value. Example: "MyApi.Deployment" -> "MyApiDeployment1234567890" :param logical_id: Logical ID of the resource (Ex: MyLambdaFunction) :param property: Property on the resource that can be referenced (Ex: Alias) :param value: Value that this reference resolves to. :return: nothing
def _ycbcr2l(self, mode): """Convert from YCbCr to L. """ self._check_modes(("YCbCr", "YCbCrA")) self.channels = [self.channels[0]] + self.channels[3:] if self.fill_value is not None: self.fill_value = [self.fill_value[0]] + self.fill_value[3:] self.mode = mode
Convert from YCbCr to L.
def calc_xyz2surf(surf, xyz, threshold=20, exponent=None, std=None): """Calculate transformation matrix from xyz values to vertices. Parameters ---------- surf : instance of wonambi.attr.Surf the surface of only one hemisphere. xyz : numpy.ndarray nChan x 3 matrix, with the locations in x, y, z. std : float distance in mm of the Gaussian kernel exponent : int inverse law (1-> direct inverse, 2-> inverse square, 3-> inverse cube) threshold : float distance in mm for a vertex to pick up electrode activity (if distance is above the threshold, one electrode does not affect a vertex). Returns ------- numpy.ndarray nVertices X xyz.shape[0] matrix Notes ----- This function is a helper when plotting onto brain surface, by creating a transformation matrix from the values in space (f.e. at each electrode) to the position of the vertices (used to show the brain surface). There are many ways to move from values to vertices. The crucial parameter is the function at which activity decreases in respect to the distance. You can have an inverse relationship by specifying 'exponent'. If 'exponent' is 2, then the activity will decrease as inverse square of the distance. The function can be a Gaussian. With std, you specify the width of the gaussian kernel in mm. For each vertex, it uses a threshold based on the distance ('threshold' value, in mm). Finally, it normalizes the contribution of all the channels to 1, so that the sum of the coefficients for each vertex is 1. You can also create your own matrix (and skip calc_xyz2surf altogether) and pass it as attribute to the main figure. Because it's a loop over all the vertices, this function is pretty slow, but if you calculate it once, you can reuse it. We take advantage of multiprocessing, which speeds it up considerably. """ if exponent is None and std is None: exponent = 1 if exponent is not None: lg.debug('Vertex values based on inverse-law, with exponent ' + str(exponent)) funct = partial(calc_one_vert_inverse, xyz=xyz, exponent=exponent) elif std is not None: lg.debug('Vertex values based on gaussian, with s.d. ' + str(std)) funct = partial(calc_one_vert_gauss, xyz=xyz, std=std) with Pool() as p: xyz2surf = p.map(funct, surf.vert) xyz2surf = asarray(xyz2surf) if exponent is not None: threshold_value = (1 / (threshold ** exponent)) external_threshold_value = threshold_value elif std is not None: threshold_value = gauss(threshold, std) external_threshold_value = gauss(std, std) # this is around 0.607 lg.debug('Values thresholded at ' + str(threshold_value)) xyz2surf[xyz2surf < threshold_value] = NaN # here we deal with vertices that are within the threshold value but far # from a single electrodes, so those remain empty sumval = nansum(xyz2surf, axis=1) sumval[sumval < external_threshold_value] = NaN # normalize by the number of electrodes xyz2surf /= atleast_2d(sumval).T xyz2surf[isnan(xyz2surf)] = 0 return xyz2surf
Calculate transformation matrix from xyz values to vertices. Parameters ---------- surf : instance of wonambi.attr.Surf the surface of only one hemisphere. xyz : numpy.ndarray nChan x 3 matrix, with the locations in x, y, z. std : float distance in mm of the Gaussian kernel exponent : int inverse law (1-> direct inverse, 2-> inverse square, 3-> inverse cube) threshold : float distance in mm for a vertex to pick up electrode activity (if distance is above the threshold, one electrode does not affect a vertex). Returns ------- numpy.ndarray nVertices X xyz.shape[0] matrix Notes ----- This function is a helper when plotting onto brain surface, by creating a transformation matrix from the values in space (f.e. at each electrode) to the position of the vertices (used to show the brain surface). There are many ways to move from values to vertices. The crucial parameter is the function at which activity decreases in respect to the distance. You can have an inverse relationship by specifying 'exponent'. If 'exponent' is 2, then the activity will decrease as inverse square of the distance. The function can be a Gaussian. With std, you specify the width of the gaussian kernel in mm. For each vertex, it uses a threshold based on the distance ('threshold' value, in mm). Finally, it normalizes the contribution of all the channels to 1, so that the sum of the coefficients for each vertex is 1. You can also create your own matrix (and skip calc_xyz2surf altogether) and pass it as attribute to the main figure. Because it's a loop over all the vertices, this function is pretty slow, but if you calculate it once, you can reuse it. We take advantage of multiprocessing, which speeds it up considerably.
def update_package(self, package_version_details, feed_id, package_name, package_version): """UpdatePackage. [Preview API] :param :class:`<PackageVersionDetails> <azure.devops.v5_0.npm.models.PackageVersionDetails>` package_version_details: :param str feed_id: :param str package_name: :param str package_version: :rtype: :class:`<Package> <azure.devops.v5_0.npm.models.Package>` """ route_values = {} if feed_id is not None: route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str') if package_name is not None: route_values['packageName'] = self._serialize.url('package_name', package_name, 'str') if package_version is not None: route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str') content = self._serialize.body(package_version_details, 'PackageVersionDetails') response = self._send(http_method='PATCH', location_id='ed579d62-67c9-4271-be66-9b029af5bcf9', version='5.0-preview.1', route_values=route_values, content=content) return self._deserialize('Package', response)
UpdatePackage. [Preview API] :param :class:`<PackageVersionDetails> <azure.devops.v5_0.npm.models.PackageVersionDetails>` package_version_details: :param str feed_id: :param str package_name: :param str package_version: :rtype: :class:`<Package> <azure.devops.v5_0.npm.models.Package>`
def get_namespaces_from_names(name, all_names): """Return a generator which yields namespace objects.""" names = configuration_namespaces.keys() if all_names else [name] for name in names: yield get_namespace(name)
Return a generator which yields namespace objects.
def get_brokendate_fx_forward_rate(self, asset_manager_id, asset_id, price_date, value_date): """ This method takes calculates broken date forward FX rate based on the passed in parameters """ self.logger.info('Calculate broken date FX Forward - Asset Manager: %s - Asset (currency): %s - Price Date: %s - Value Date: %s', asset_manager_id, asset_id, price_date, value_date) url = '%s/brokendateforward/%s' % (self.endpoint, asset_manager_id) params = {'value_date': value_date, 'asset_id':asset_id, 'price_date': price_date} response = self.session.get(url=url, params = params) if response.ok: forward_rate = response.json() self.logger.info('Retrieved broken date FX forward rate %s - %s: %s', asset_id, price_date, value_date) return forward_rate else: self.logger.error(response.text) response.raise_for_status()
This method takes calculates broken date forward FX rate based on the passed in parameters
def add_sideplot( ax, along, pad=0., *, grid=True, zero_line=True, arrs_to_bin=None, normalize_bin=True, ymin=0, ymax=1.1, height=0.75, c="C0" ): """Add a sideplot to an axis. Sideplots share their corresponding axis. Parameters ---------- ax : matplotlib AxesSubplot object The axis to add a sideplot along. along : {'x', 'y'} The dimension to add a sideplot along. pad : number (optional) Distance between axis and sideplot. Default is 0. grid : bool (optional) Toggle for plotting grid on sideplot. Default is True. zero_line : bool (optional) Toggle for plotting black line at zero signal. Default is True. arrs_to_bin : list [xi, yi, zi] (optional) Bins are plotted if arrays are supplied. Default is None. normalize_bin : bool (optional) Normalize bin by max value. Default is True. ymin : number (optional) Bin minimum extent. Default is 0. ymax : number (optional) Bin maximum extent. Default is 1.1 c : string (optional) Line color. Default is C0. Returns ------- axCorr AxesSubplot object """ # divider should only be created once if hasattr(ax, "WrightTools_sideplot_divider"): divider = ax.WrightTools_sideplot_divider else: divider = make_axes_locatable(ax) setattr(ax, "WrightTools_sideplot_divider", divider) # create sideplot axis if along == "x": axCorr = divider.append_axes("top", height, pad=pad, sharex=ax) elif along == "y": axCorr = divider.append_axes("right", height, pad=pad, sharey=ax) axCorr.autoscale(False) axCorr.set_adjustable("box") # bin if arrs_to_bin is not None: xi, yi, zi = arrs_to_bin if along == "x": b = np.nansum(zi, axis=0) * len(yi) if normalize_bin: b /= np.nanmax(b) axCorr.plot(xi, b, c=c, lw=2) elif along == "y": b = np.nansum(zi, axis=1) * len(xi) if normalize_bin: b /= np.nanmax(b) axCorr.plot(b, yi, c=c, lw=2) # beautify if along == "x": axCorr.set_ylim(ymin, ymax) axCorr.tick_params(axis="x", which="both", length=0) elif along == "y": axCorr.set_xlim(ymin, ymax) axCorr.tick_params(axis="y", which="both", length=0) plt.grid(grid) if zero_line: if along == "x": plt.axhline(0, c="k", lw=1) elif along == "y": plt.axvline(0, c="k", lw=1) plt.setp(axCorr.get_xticklabels(), visible=False) plt.setp(axCorr.get_yticklabels(), visible=False) return axCorr
Add a sideplot to an axis. Sideplots share their corresponding axis. Parameters ---------- ax : matplotlib AxesSubplot object The axis to add a sideplot along. along : {'x', 'y'} The dimension to add a sideplot along. pad : number (optional) Distance between axis and sideplot. Default is 0. grid : bool (optional) Toggle for plotting grid on sideplot. Default is True. zero_line : bool (optional) Toggle for plotting black line at zero signal. Default is True. arrs_to_bin : list [xi, yi, zi] (optional) Bins are plotted if arrays are supplied. Default is None. normalize_bin : bool (optional) Normalize bin by max value. Default is True. ymin : number (optional) Bin minimum extent. Default is 0. ymax : number (optional) Bin maximum extent. Default is 1.1 c : string (optional) Line color. Default is C0. Returns ------- axCorr AxesSubplot object
def rate_timestamp(self, rate_timestamp): """ Force the rate_timestamp to be a datetime :param rate_timestamp: :return: """ if rate_timestamp is not None: if isinstance(rate_timestamp, (str, type_check)): rate_timestamp = parse(rate_timestamp).replace(tzinfo=pytz.utc) if type(rate_timestamp) == date: rate_timestamp = datetime.combine(rate_timestamp, datetime.min.time()).replace(tzinfo=pytz.utc) if not rate_timestamp.tzinfo: raise ValueError('Cannot set an FX rate timestamp without a timezone') self._rate_timestamp = rate_timestamp
Force the rate_timestamp to be a datetime :param rate_timestamp: :return:
def normalize_string(mac_type, resource, content_hash): """Serializes mac_type and resource into a HAWK string.""" normalized = [ 'hawk.' + str(HAWK_VER) + '.' + mac_type, normalize_header_attr(resource.timestamp), normalize_header_attr(resource.nonce), normalize_header_attr(resource.method or ''), normalize_header_attr(resource.name or ''), normalize_header_attr(resource.host), normalize_header_attr(resource.port), normalize_header_attr(content_hash or '') ] # The blank lines are important. They follow what the Node Hawk lib does. normalized.append(normalize_header_attr(resource.ext or '')) if resource.app: normalized.append(normalize_header_attr(resource.app)) normalized.append(normalize_header_attr(resource.dlg or '')) # Add trailing new line. normalized.append('') normalized = '\n'.join(normalized) return normalized
Serializes mac_type and resource into a HAWK string.
def preserve_channel_dim(func): """Preserve dummy channel dim.""" @wraps(func) def wrapped_function(img, *args, **kwargs): shape = img.shape result = func(img, *args, **kwargs) if len(shape) == 3 and shape[-1] == 1 and len(result.shape) == 2: result = np.expand_dims(result, axis=-1) return result return wrapped_function
Preserve dummy channel dim.
def reqToTxn(req): """ Transform a client request such that it can be stored in the ledger. Also this is what will be returned to the client in the reply :param req: :return: """ if isinstance(req, str): req = json.loads(req) if isinstance(req, dict): kwargs = dict( identifier=req.get(f.IDENTIFIER.nm, None), reqId=req.get(f.REQ_ID.nm, None), operation=req.get(OPERATION, None), signature=req.get(f.SIG.nm, None), signatures=req.get(f.SIGS.nm, None), protocolVersion=req.get(f.PROTOCOL_VERSION.nm, None) ) req = TxnUtilConfig.client_request_class(**kwargs) if isinstance(req, Request): req_data = req.as_dict req_data[f.DIGEST.nm] = req.digest req_data[f.PAYLOAD_DIGEST.nm] = req.payload_digest else: raise TypeError( "Expected dict or str as input, but got: {}".format(type(req))) req_data = deepcopy(req_data) return do_req_to_txn(req_data=req_data, req_op=req_data[OPERATION])
Transform a client request such that it can be stored in the ledger. Also this is what will be returned to the client in the reply :param req: :return:
def load_plugins(config, plugin_kwargs): """ Discover and instantiate plugins. Args: config (dict): loaded configuration for the Gordon service. plugin_kwargs (dict): keyword arguments to give to plugins during instantiation. Returns: Tuple of 3 lists: list of names of plugins, list of instantiated plugin objects, and any errors encountered while loading/instantiating plugins. A tuple of three empty lists is returned if there are no plugins found or activated in gordon config. """ installed_plugins = _gather_installed_plugins() metrics_plugin = _get_metrics_plugin(config, installed_plugins) if metrics_plugin: plugin_kwargs['metrics'] = metrics_plugin active_plugins = _get_activated_plugins(config, installed_plugins) if not active_plugins: return [], [], [], None plugin_namespaces = _get_plugin_config_keys(active_plugins) plugin_configs = _load_plugin_configs(plugin_namespaces, config) plugin_names, plugins, errors = _init_plugins( active_plugins, installed_plugins, plugin_configs, plugin_kwargs) return plugin_names, plugins, errors, plugin_kwargs
Discover and instantiate plugins. Args: config (dict): loaded configuration for the Gordon service. plugin_kwargs (dict): keyword arguments to give to plugins during instantiation. Returns: Tuple of 3 lists: list of names of plugins, list of instantiated plugin objects, and any errors encountered while loading/instantiating plugins. A tuple of three empty lists is returned if there are no plugins found or activated in gordon config.
def find_filepath( filename, basepaths=(os.path.curdir, DATA_PATH, BIGDATA_PATH, BASE_DIR, '~', '~/Downloads', os.path.join('/', 'tmp'), '..')): """ Given a filename or path see if it exists in any of the common places datafiles might be >>> p = find_filepath('iq_test.csv') >>> p == expand_filepath(os.path.join(DATA_PATH, 'iq_test.csv')) True >>> p[-len('iq_test.csv'):] 'iq_test.csv' >>> find_filepath('exponentially-crazy-filename-2.718281828459045.nonexistent') False """ if os.path.isfile(filename): return filename for basedir in basepaths: fullpath = expand_filepath(os.path.join(basedir, filename)) if os.path.isfile(fullpath): return fullpath return False
Given a filename or path see if it exists in any of the common places datafiles might be >>> p = find_filepath('iq_test.csv') >>> p == expand_filepath(os.path.join(DATA_PATH, 'iq_test.csv')) True >>> p[-len('iq_test.csv'):] 'iq_test.csv' >>> find_filepath('exponentially-crazy-filename-2.718281828459045.nonexistent') False
def create_comment(self, access_token, video_id, content, reply_id=None, captcha_key=None, captcha_text=None): """doc: http://open.youku.com/docs/doc?id=41 """ url = 'https://openapi.youku.com/v2/comments/create.json' data = { 'client_id': self.client_id, 'access_token': access_token, 'video_id': video_id, 'content': content, 'reply_id': reply_id, 'captcha_key': captcha_key, 'captcha_text': captcha_text } data = remove_none_value(data) r = requests.post(url, data=data) check_error(r) return r.json()['id']
doc: http://open.youku.com/docs/doc?id=41
def evpn_instance_mac_timer_max_count(self, **kwargs): """ Add "Duplicate MAC max count" under evpn instance. Args: evpn_instance_name: Instance name for evpn max_count: Duplicate MAC max count. enable (bool): If target community needs to be enabled or disabled.Default:``True``. get (bool) : Get config instead of editing config. (True, False) rbridge_id (str): rbridge-id for device. Only required when type is `ve`. callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if 'evpn_instance_name' is not passed. ValueError: if 'evpn_instance_name' is invalid. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output=dev.interface.evpn_instance_mac_timer_max_count( ... evpn_instance_name='100', ... max_count='10' ... rbridge_id='1') ... output=dev.interface.evpn_instance_mac_timer_max_count( ... get=True, ... evpn_instance_name='100', ... max_count='10' ... rbridge_id='1') ... output=dev.interface.evpn_instance_mac_timer_max_count( ... enable=False, ... evpn_instance_name='101', ... max_count='10' ... rbridge_id='1') ... output=dev.interface.evpn_instance_mac_timer_max_count( ... get=True, ... evpn_instance_name='101', ... rbridge_id='1') ... # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): KeyError """ evpn_instance_name = kwargs.pop('evpn_instance_name', '') max_count = kwargs.pop('max_count', '5') enable = kwargs.pop('enable', True) get = kwargs.pop('get', False) rbridge_id = kwargs.pop('rbridge_id', '1') callback = kwargs.pop('callback', self._callback) evpn_args = dict(instance_name=evpn_instance_name, max_count=max_count) if get: enable = None method_name = 'rbridge_id_evpn_instance_duplicate_'\ 'mac_timer_max_count' method_class = self._rbridge evpn_args['rbridge_id'] = rbridge_id evpn_instance_mac_timer_max_count = getattr(method_class, method_name) config = evpn_instance_mac_timer_max_count(**evpn_args) if get: return callback(config, handler='get_config') if not enable: config.find('.//*duplicate-mac-timer').set('operation', 'delete') return callback(config)
Add "Duplicate MAC max count" under evpn instance. Args: evpn_instance_name: Instance name for evpn max_count: Duplicate MAC max count. enable (bool): If target community needs to be enabled or disabled.Default:``True``. get (bool) : Get config instead of editing config. (True, False) rbridge_id (str): rbridge-id for device. Only required when type is `ve`. callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if 'evpn_instance_name' is not passed. ValueError: if 'evpn_instance_name' is invalid. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output=dev.interface.evpn_instance_mac_timer_max_count( ... evpn_instance_name='100', ... max_count='10' ... rbridge_id='1') ... output=dev.interface.evpn_instance_mac_timer_max_count( ... get=True, ... evpn_instance_name='100', ... max_count='10' ... rbridge_id='1') ... output=dev.interface.evpn_instance_mac_timer_max_count( ... enable=False, ... evpn_instance_name='101', ... max_count='10' ... rbridge_id='1') ... output=dev.interface.evpn_instance_mac_timer_max_count( ... get=True, ... evpn_instance_name='101', ... rbridge_id='1') ... # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): KeyError
def extractArgumentsFromCallStr(callStr): """ Parse the argument string via an AST instead of the overly simple callStr.split(','). The latter incorrectly splits any string parameters that contain commas therein, like ic(1, 'a,b', 2). """ def isTuple(ele): return classname(ele) == 'Tuple' paramsStr = callStr.split('(', 1)[-1].rsplit(')', 1)[0].strip() root = ast.parse(paramsStr).body[0].value eles = root.elts if isTuple(root) else [root] # The ast module parses 'a, b' and '(a, b)' identically. Thus, ast.parse() # alone can't tell the difference between # # ic(a, b) # # and # # ic((a, b)) # # Detect this situation and preserve whole tuples, e.g. '(a, b)', passed to # ic() by creating a new, temporary tuple around the original tuple and # parsing that. if paramsStr[0] == '(' and paramsStr[-1] == ')' and len(eles) > 1: newTupleStr = '(' + paramsStr + ", 'ignored')" argStrs = extractArgumentsFromCallStr(newTupleStr)[:-1] return argStrs indices = [ max(0, e.col_offset - 1) if isTuple(e) else e.col_offset for e in eles] argStrs = [s.strip(' ,') for s in splitStringAtIndices(paramsStr, indices)] return argStrs
Parse the argument string via an AST instead of the overly simple callStr.split(','). The latter incorrectly splits any string parameters that contain commas therein, like ic(1, 'a,b', 2).
def fget_object(self, bucket_name, object_name, file_path, request_headers=None, sse=None): """ Retrieves an object from a bucket and writes at file_path. Examples: minio.fget_object('foo', 'bar', 'localfile') :param bucket_name: Bucket to read object from. :param object_name: Name of the object to read. :param file_path: Local file path to save the object. :param request_headers: Any additional headers to be added with GET request. """ is_valid_bucket_name(bucket_name) is_non_empty_string(object_name) stat = self.stat_object(bucket_name, object_name, sse) if os.path.isdir(file_path): raise OSError("file is a directory.") # Create top level directory if needed. top_level_dir = os.path.dirname(file_path) if top_level_dir: mkdir_p(top_level_dir) # Write to a temporary file "file_path.part.minio" before saving. file_part_path = file_path + stat.etag + '.part.minio' # Open file in 'write+append' mode. with open(file_part_path, 'ab') as file_part_data: # Save current file_part statinfo. file_statinfo = os.stat(file_part_path) # Get partial object. response = self._get_partial_object(bucket_name, object_name, offset=file_statinfo.st_size, length=0, request_headers=request_headers, sse=sse) # Save content_size to verify if we wrote more data. content_size = int(response.headers['content-length']) # Save total_written. total_written = 0 for data in response.stream(amt=1024 * 1024): file_part_data.write(data) total_written += len(data) # Release the connection from the response at this point. response.release_conn() # Verify if we wrote data properly. if total_written < content_size: msg = 'Data written {0} bytes is smaller than the' \ 'specified size {1} bytes'.format(total_written, content_size) raise InvalidSizeError(msg) if total_written > content_size: msg = 'Data written {0} bytes is in excess than the' \ 'specified size {1} bytes'.format(total_written, content_size) raise InvalidSizeError(msg) #Delete existing file to be compatible with Windows if os.path.exists(file_path): os.remove(file_path) #Rename with destination file path os.rename(file_part_path, file_path) # Return the stat return stat
Retrieves an object from a bucket and writes at file_path. Examples: minio.fget_object('foo', 'bar', 'localfile') :param bucket_name: Bucket to read object from. :param object_name: Name of the object to read. :param file_path: Local file path to save the object. :param request_headers: Any additional headers to be added with GET request.
def build_bond(iface, **settings): ''' Create a bond script in /etc/modprobe.d with the passed settings and load the bonding kernel module. CLI Example: .. code-block:: bash salt '*' ip.build_bond bond0 mode=balance-alb ''' rh_major = __grains__['osrelease'][:1] opts = _parse_settings_bond(settings, iface) try: template = JINJA.get_template('conf.jinja') except jinja2.exceptions.TemplateNotFound: log.error('Could not load template conf.jinja') return '' data = template.render({'name': iface, 'bonding': opts}) _write_file_iface(iface, data, _RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) if rh_major == '5': __salt__['cmd.run']( 'sed -i -e "/^alias\\s{0}.*/d" /etc/modprobe.conf'.format(iface), python_shell=False ) __salt__['cmd.run']( 'sed -i -e "/^options\\s{0}.*/d" /etc/modprobe.conf'.format(iface), python_shell=False ) __salt__['file.append']('/etc/modprobe.conf', path) __salt__['kmod.load']('bonding') if settings['test']: return _read_temp(data) return _read_file(path)
Create a bond script in /etc/modprobe.d with the passed settings and load the bonding kernel module. CLI Example: .. code-block:: bash salt '*' ip.build_bond bond0 mode=balance-alb