code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def create_client_with_manual_poll(api_key, config_cache_class=None, base_url=None): if api_key is None: raise ConfigCatClientException('API Key is required.') return ConfigCatClient(api_key, 0, 0, None, 0, config_cache_class, base_url)
Create an instance of ConfigCatClient and setup Manual Poll mode with custom options :param api_key: ConfigCat ApiKey to access your configuration. :param config_cache_class: If you want to use custom caching instead of the client's default InMemoryConfigCache, You can provide an implementation of ConfigCache. :param base_url: You can set a base_url if you want to use a proxy server between your application and ConfigCat
def has_textonly_pdf(): args_tess = ['tesseract', '--print-parameters', 'pdf'] params = '' try: params = check_output(args_tess, universal_newlines=True, stderr=STDOUT) except CalledProcessError as e: print("Could not --print-parameters from tesseract", file=sys.stderr) raise MissingDependencyError from e if 'textonly_pdf' in params: return True return False
Does Tesseract have textonly_pdf capability? Available in v4.00.00alpha since January 2017. Best to parse the parameter list
def Delete(self): args = user_management_pb2.ApiDeleteGrrUserArgs(username=self.username) self._context.SendRequest("DeleteGrrUser", args)
Deletes the user.
def cast_values_csvs(d, idx, x): try: d[idx].append(float(x)) except ValueError: d[idx].append(x) except KeyError as e: logger_misc.warn("cast_values_csv: KeyError: col: {}, {}".format(x, e)) return d
Attempt to cast string to float. If error, keep as a string. :param dict d: Data :param int idx: Index number :param str x: Data :return any:
def create(self, friendly_name, event_callback_url=values.unset, events_filter=values.unset, multi_task_enabled=values.unset, template=values.unset, prioritize_queue_order=values.unset): data = values.of({ 'FriendlyName': friendly_name, 'EventCallbackUrl': event_callback_url, 'EventsFilter': events_filter, 'MultiTaskEnabled': multi_task_enabled, 'Template': template, 'PrioritizeQueueOrder': prioritize_queue_order, }) payload = self._version.create( 'POST', self._uri, data=data, ) return WorkspaceInstance(self._version, payload, )
Create a new WorkspaceInstance :param unicode friendly_name: Human readable description of this workspace :param unicode event_callback_url: If provided, the Workspace will publish events to this URL. :param unicode events_filter: Use this parameter to receive webhooks on EventCallbackUrl for specific events on a workspace. :param bool multi_task_enabled: Multi tasking allows workers to handle multiple tasks simultaneously. :param unicode template: One of the available template names. :param WorkspaceInstance.QueueOrder prioritize_queue_order: Use this parameter to configure whether to prioritize LIFO or FIFO when workers are receiving Tasks from combination of LIFO and FIFO TaskQueues. :returns: Newly created WorkspaceInstance :rtype: twilio.rest.taskrouter.v1.workspace.WorkspaceInstance
def _get_proc_username(proc): try: return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username) except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError): return None
Returns the username of a Process instance. It's backward compatible with < 2.0 versions of psutil.
def _switch_partition() -> RootPartitions: res = subprocess.check_output(['ot-switch-partitions']) for line in res.split(b'\n'): matches = re.match( b'Current boot partition: ([23]), setting to ([23])', line) if matches: return {b'2': RootPartitions.TWO, b'3': RootPartitions.THREE}[matches.group(2)] else: raise RuntimeError(f'Bad output from ot-switch-partitions: {res}')
Switch the active boot partition using the switch script
def ver_cmp(ver1, ver2): return cmp( pkg_resources.parse_version(ver1), pkg_resources.parse_version(ver2) )
Compare lago versions Args: ver1(str): version string ver2(str): version string Returns: Return negative if ver1<ver2, zero if ver1==ver2, positive if ver1>ver2.
def get_min_instability(self, min_voltage=None, max_voltage=None): data = [] for pair in self._select_in_voltage_range(min_voltage, max_voltage): if pair.decomp_e_charge is not None: data.append(pair.decomp_e_charge) if pair.decomp_e_discharge is not None: data.append(pair.decomp_e_discharge) return min(data) if len(data) > 0 else None
The minimum instability along a path for a specific voltage range. Args: min_voltage: The minimum allowable voltage. max_voltage: The maximum allowable voltage. Returns: Minimum decomposition energy of all compounds along the insertion path (a subset of the path can be chosen by the optional arguments)
def hash(self): return hash( self._shape + ( self.tilewidth, self.tilelength, self.tiledepth, self.bitspersample, self.fillorder, self.predictor, self.extrasamples, self.photometric, self.compression, self.planarconfig))
Return checksum to identify pages in same series.
def _deserialize(cls, key, value, fields): converter = cls._get_converter_for_field(key, None, fields) return converter.deserialize(value)
Marshal incoming data into Python objects.
def calc_qpout_v1(self): der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess for idx in range(der.nmb): flu.qpout[idx] = flu.qma[idx]+flu.qar[idx]
Calculate the ARMA results for the different response functions. Required derived parameter: |Nmb| Required flux sequences: |QMA| |QAR| Calculated flux sequence: |QPOut| Examples: Initialize an arma model with three different response functions: >>> from hydpy.models.arma import * >>> parameterstep() >>> derived.nmb(3) >>> fluxes.qma.shape = 3 >>> fluxes.qar.shape = 3 >>> fluxes.qpout.shape = 3 Define the output values of the MA and of the AR processes associated with the three response functions and apply method |calc_qpout_v1|: >>> fluxes.qar = 4.0, 5.0, 6.0 >>> fluxes.qma = 1.0, 2.0, 3.0 >>> model.calc_qpout_v1() >>> fluxes.qpout qpout(5.0, 7.0, 9.0)
def parse_kwargs(kwargs): d = defaultdict(list) for k, v in ((k.lstrip('-'), v) for k,v in (a.split('=') for a in kwargs)): d[k].append(v) ret = {} for k, v in d.items(): if len(v) == 1 and type(v) is list: ret[k] = v[0] else: ret[k] = v return ret
Convert a list of kwargs into a dictionary. Duplicates of the same keyword get added to an list within the dictionary. >>> parse_kwargs(['--var1=1', '--var2=2', '--var1=3'] {'var1': [1, 3], 'var2': 2}
def get_script_property(value, is_bytes=False): obj = unidata.ascii_scripts if is_bytes else unidata.unicode_scripts if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['script'].get(negated, negated) else: value = unidata.unicode_alias['script'].get(value, value) return obj[value]
Get `SC` property.
def connect(self, region, **kw_params): self.ec2 = boto.ec2.connect_to_region(region, **kw_params) if not self.ec2: raise EC2ManagerException('Unable to connect to region "%s"' % region) self.remote_images.clear() if self.images and any(('image_name' in img and 'image_id' not in img) for img in self.images.values()): for img in self.images.values(): if 'image_name' in img and 'image_id' not in img: img['image_id'] = self.resolve_image_name(img.pop('image_name'))
Connect to a EC2. :param region: The name of the region to connect to. :type region: str :param kw_params: :type kw_params: dict
def get_record_types(self): from ..type.objects import TypeList type_list = [] for type_idstr in self._supported_record_type_ids: type_list.append(Type(**self._record_type_data_sets[Id(type_idstr).get_identifier()])) return TypeList(type_list)
Gets the record types available in this object. A record ``Type`` explicitly indicates the specification of an interface to the record. A record may or may not inherit other record interfaces through interface inheritance in which case support of a record type may not be explicit in the returned list. Interoperability with the typed interface to this object should be performed through ``hasRecordType()``. return: (osid.type.TypeList) - the record types available *compliance: mandatory -- This method must be implemented.*
def default(self): output = ensure_unicode(self.git.log( '-1', '-p', '--no-color', '--format=%s', ).stdout) lines = output.splitlines() return u'\n'.join( itertools.chain( lines[:1], itertools.islice( itertools.dropwhile( lambda x: not x.startswith('+++'), lines[1:], ), 1, None, ), ) )
Return last changes in truncated unified diff format
def defaults(): return dict((str(k), str(v)) for k, v in cma_default_options.items())
return a dictionary with default option values and description
def add(self, entity): characteristic = self.extract_traits(entity) if not characteristic.traits: return if characteristic.is_matching: self.add_match(entity, *characteristic.traits) else: self.add_mismatch(entity, *characteristic.traits)
Add entity to index. :param object entity: single object to add to box's index
def zip_dict(a: Dict[str, A], b: Dict[str, B]) \ -> Dict[str, Tuple[Optional[A], Optional[B]]]: return {key: (a.get(key), b.get(key)) for key in a.keys() | b.keys()}
Combine the values within two dictionaries by key. :param a: The first dictionary. :param b: The second dictionary. :return: A dictionary containing all keys that appear in the union of a and b. Values are pairs where the first part is a's value for the key, and right second part b's value.
def get_commits(self, since_sha=None): assert self.tempdir cmd = ['git', 'log', '--first-parent', '--reverse', COMMIT_FORMAT] if since_sha: commits = [self.get_commit(since_sha)] cmd.append('{}..HEAD'.format(since_sha)) else: commits = [] cmd.append('HEAD') output = cmd_output(*cmd, cwd=self.tempdir) for sha, date in chunk_iter(output.splitlines(), 2): commits.append(Commit(sha, int(date))) return commits
Returns a list of Commit objects. Args: since_sha - (optional) A sha to search from
def set_ard_time(self, us): t = int((us / 250) - 1) if (t < 0): t = 0 if (t > 0xF): t = 0xF _send_vendor_setup(self.handle, SET_RADIO_ARD, t, 0, ())
Set the ACK retry delay for radio communication
def _default(self, obj): return obj.__dict__ if isinstance(obj, JsonObj) else json.JSONDecoder().decode(obj)
return a serialized version of obj or raise a TypeError :param obj: :return: Serialized version of obj
def request_login(blink, url, username, password, is_retry=False): headers = { 'Host': DEFAULT_URL, 'Content-Type': 'application/json' } data = dumps({ 'email': username, 'password': password, 'client_specifier': 'iPhone 9.2 | 2.2 | 222' }) return http_req(blink, url=url, headers=headers, data=data, json_resp=False, reqtype='post', is_retry=is_retry)
Login request. :param blink: Blink instance. :param url: Login url. :param username: Blink username. :param password: Blink password. :param is_retry: Is this part of a re-authorization attempt?
def get_as_float_with_default(self, index, default_value): value = self[index] return FloatConverter.to_float_with_default(value, default_value)
Converts array element into a float or returns default value if conversion is not possible. :param index: an index of element to get. :param default_value: the default value :return: float value ot the element or default value if conversion is not supported.
def _auto_commit(self): if not self.auto_commit or self.auto_commit_every_n is None: return if self.count_since_commit >= self.auto_commit_every_n: self.commit()
Check if we have to commit based on number of messages and commit
def updateData(self,exten,data): _extnum=self._interpretExten(exten) fimg = fileutil.openImage(self._filename, mode='update', memmap=False) fimg[_extnum].data = data fimg[_extnum].header = self._image[_extnum].header fimg.close()
Write out updated data and header to the original input file for this object.
def languages_column(self, obj): languages = self.get_available_languages(obj) return '<span class="available-languages">{0}</span>'.format( " ".join(languages) )
Adds languages columns.
def ip(self): if not self._ip: if 'ip' in self.config: ip = self.config['ip'] else: ip = self.protocol.transport.get_extra_info('sockname')[0] ip = ip_address(ip) if ip.version == 4: self._ip = ip else: response = urlopen('http://ipv4.icanhazip.com/') ip = response.read().strip().decode() ip = ip_address(ip) self._ip = ip return self._ip
return bot's ip as an ``ip_address`` object
def dicomdir_info(dirpath, *args, **kwargs): dr = DicomReader(dirpath=dirpath, *args, **kwargs) info = dr.dicomdirectory.get_stats_of_series_in_dir() return info
Get information about series in dir
def checkout(self, *args, **kwargs): self._call_helper("Checking out", self.real.checkout, *args, **kwargs)
This function checks out source code.
def get_session_key(self, username, password_hash): params = {"username": username, "authToken": md5(username + password_hash)} request = _Request(self.network, "auth.getMobileSession", params) request.sign_it() doc = request.execute() return _extract(doc, "key")
Retrieve a session key with a username and a md5 hash of the user's password.
def set_level(level): Logger.level = level for logger in Logger.loggers.values(): logger.setLevel(level)
Set level of logging for all loggers. Args: level (int): level of logging.
def pack_value(self, val): if isinstance(val, bytes): val = list(iterbytes(val)) slen = len(val) if self.pad: pad = b'\0\0' * (slen % 2) else: pad = b'' return struct.pack('>' + 'H' * slen, *val) + pad, slen, None
Convert 8-byte string into 16-byte list
def _delete_partition(self, tenant_id, tenant_name): self.dcnm_obj.delete_partition(tenant_name, fw_const.SERV_PART_NAME)
Function to delete a service partition.
def _token_extensions(self): token_provider = self.config['sasl_oauth_token_provider'] if callable(getattr(token_provider, "extensions", None)) and len(token_provider.extensions()) > 0: msg = "\x01".join(["{}={}".format(k, v) for k, v in token_provider.extensions().items()]) return "\x01" + msg else: return ""
Return a string representation of the OPTIONAL key-value pairs that can be sent with an OAUTHBEARER initial request.
def _build_indexes(self): if isinstance(self._data, list): for d in self._data: if not isinstance(d, dict): err = u'Cannot build index for non Dict type.' self._tcex.log.error(err) raise RuntimeError(err) data_obj = DataObj(d) self._master_index.setdefault(id(data_obj), data_obj) for key, value in d.items(): if not isinstance(value, (float, int, str)): self._tcex.log.debug(u'Can only build index String Types.') continue self._indexes.setdefault(key, {}).setdefault(value, []).append(data_obj) else: err = u'Only *List* data type is currently supported' self._tcex.log.error(err) raise RuntimeError(err)
Build indexes from data for fast filtering of data. Building indexes of data when possible. This is only supported when dealing with a List of Dictionaries with String values.
def show_grid(self, **kwargs): kwargs.setdefault('grid', 'back') kwargs.setdefault('location', 'outer') kwargs.setdefault('ticks', 'both') return self.show_bounds(**kwargs)
A wrapped implementation of ``show_bounds`` to change default behaviour to use gridlines and showing the axes labels on the outer edges. This is intended to be silimar to ``matplotlib``'s ``grid`` function.
def get_model(with_pipeline=False): model = NeuralNetClassifier(MLPClassifier) if with_pipeline: model = Pipeline([ ('scale', FeatureUnion([ ('minmax', MinMaxScaler()), ('normalize', Normalizer()), ])), ('select', SelectKBest(k=N_FEATURES)), ('net', model), ]) return model
Get a multi-layer perceptron model. Optionally, put it in a pipeline that scales the data.
def print(self): print( '{dim}Identifier:{none} {cyan}{identifier}{none}\n' '{dim}Name:{none} {name}\n' '{dim}Description:{none}\n{description}'.format( dim=Style.DIM, cyan=Fore.CYAN, none=Style.RESET_ALL, identifier=self.identifier, name=self.name, description=pretty_description(self.description, indent=2) ) ) if hasattr(self, 'argument_list') and self.argument_list: print('{dim}Arguments:{none}'.format( dim=Style.DIM, none=Style.RESET_ALL)) for argument in self.argument_list: argument.print(indent=2)
Print self.
def desaturate(self, level): h, s, l = self.__hsl return Color((h, max(s - level, 0), l), 'hsl', self.__a, self.__wref)
Create a new instance based on this one but less saturated. Parameters: :level: The amount by which the color should be desaturated to produce the new one [0...1]. Returns: A grapefruit.Color instance. >>> Color.from_hsl(30, 0.5, 0.5).desaturate(0.25) Color(0.625, 0.5, 0.375, 1.0) >>> Color.from_hsl(30, 0.5, 0.5).desaturate(0.25).hsl (30.0, 0.25, 0.5)
def from_json(cls, json_data): data = json.loads(json_data) result = cls(data) if hasattr(result, "_from_json"): result._from_json() return result
Tries to convert a JSON representation to an object of the same type as self A class can provide a _fromJSON implementation in order to do specific type checking or other custom implementation details. This method will throw a ValueError for invalid JSON, a TypeError for improperly constructed, but valid JSON, and any custom errors that can be be propagated from class constructors. :param json_data: The JSON string to convert :type json_data: str | unicode :raises: TypeError, ValueError, LanguageMapInitError
def _udp_transact(self, payload, handler, *args, broadcast=False, timeout=TIMEOUT): if self.host in _BUFFER: del _BUFFER[self.host] host = self.host if broadcast: host = '255.255.255.255' retval = None for _ in range(RETRIES): _SOCKET.sendto(bytearray(payload), (host, PORT)) start = time.time() while time.time() < start + timeout: data = _BUFFER.get(self.host, None) if data: retval = handler(data, *args) if retval: return retval
Complete a UDP transaction. UDP is stateless and not guaranteed, so we have to take some mitigation steps: - Send payload multiple times. - Wait for awhile to receive response. :param payload: Payload to send. :param handler: Response handler. :param args: Arguments to pass to response handler. :param broadcast: Send a broadcast instead. :param timeout: Timeout in seconds.
def match_sr(self, svc_ref, cid=None): with self.__lock: our_sr = self.get_reference() if our_sr is None: return False sr_compare = our_sr == svc_ref if cid is None: return sr_compare our_cid = self.get_export_container_id() if our_cid is None: return False return sr_compare and our_cid == cid
Checks if this export registration matches the given service reference :param svc_ref: A service reference :param cid: A container ID :return: True if the service matches this export registration
def _delete_masked_points(*arrs): if any(hasattr(a, 'mask') for a in arrs): keep = ~functools.reduce(np.logical_or, (np.ma.getmaskarray(a) for a in arrs)) return tuple(ma.asarray(a[keep]) for a in arrs) else: return arrs
Delete masked points from arrays. Takes arrays and removes masked points to help with calculations and plotting. Parameters ---------- arrs : one or more array-like source arrays Returns ------- arrs : one or more array-like arrays with masked elements removed
def filter_ignoring_case(self, pattern): return self.filter(re.compile(pattern, re.I))
Like ``filter`` but case-insensitive. Expects a regular expression string without the surrounding ``/`` characters. >>> see().filter('^my', ignore_case=True) MyClass()
def update(name, maximum_version=None, required_version=None): flags = [('Name', name)] if maximum_version is not None: flags.append(('MaximumVersion', maximum_version)) if required_version is not None: flags.append(('RequiredVersion', required_version)) params = '' for flag, value in flags: params += '-{0} {1} '.format(flag, value) cmd = 'Update-Module {0} -Force'.format(params) _pshell(cmd) return name in list_modules()
Update a PowerShell module to a specific version, or the newest :param name: Name of a Powershell module :type name: ``str`` :param maximum_version: The maximum version to install, e.g. 1.23.2 :type maximum_version: ``str`` :param required_version: Install a specific version :type required_version: ``str`` CLI Example: .. code-block:: bash salt 'win01' psget.update PowerPlan
def add_to_obj(obj, dictionary, objs=None, exceptions=None, verbose=0): if exceptions is None: exceptions = [] for item in dictionary: if item in exceptions: continue if dictionary[item] is not None: if verbose: print("process: ", item, dictionary[item]) key, value = get_key_value(dictionary[item], objs, key=item) if verbose: print("assign: ", key, value) try: setattr(obj, key, value) except AttributeError: raise AttributeError("Can't set {0}={1} on object: {2}".format(key, value, obj))
Cycles through a dictionary and adds the key-value pairs to an object. :param obj: :param dictionary: :param exceptions: :param verbose: :return:
def imagetransformerpp_base_14l_8h_big_uncond_dr03_dan_p(): hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_l() hparams.num_decoder_layers = 14 hparams.batch_size = 8 hparams.layer_prepostprocess_dropout = 0.2 return hparams
Gets to 2.92 in just under 4 days on 8 p100s.
def make_field(self, **kwargs): kwargs['required'] = False kwargs['allow_null'] = True return self.field_class(**kwargs)
create serializer field
def version(): cmd = 'lvm version' out = __salt__['cmd.run'](cmd).splitlines() ret = out[0].split(': ') return ret[1].strip()
Return LVM version from lvm version CLI Example: .. code-block:: bash salt '*' lvm.version
def get_formatted_string(self, input_string): if isinstance(input_string, str): try: return self.get_processed_string(input_string) except KeyNotInContextError as err: raise KeyNotInContextError( f'Unable to format \'{input_string}\' because {err}' ) from err elif isinstance(input_string, SpecialTagDirective): return input_string.get_value(self) else: raise TypeError(f"can only format on strings. {input_string} is a " f"{type(input_string)} instead.")
Return formatted value for input_string. get_formatted gets a context[key] value. get_formatted_string is for any arbitrary string that is not in the context. Only valid if input_string is a type string. Return a string interpolated from the context dictionary. If input_string='Piping {key1} the {key2} wild' And context={'key1': 'down', 'key2': 'valleys', 'key3': 'value3'} Then this will return string: "Piping down the valleys wild" Args: input_string: string to parse for substitutions. Returns: Formatted string. Raises: KeyNotInContextError: context[key] has {somekey} where somekey does not exist in context dictionary. TypeError: Attempt operation on a non-string type.
def setupTable_vmtx(self): if "vmtx" not in self.tables: return self.otf["vmtx"] = vmtx = newTable("vmtx") vmtx.metrics = {} for glyphName, glyph in self.allGlyphs.items(): height = otRound(glyph.height) if height < 0: raise ValueError( "The height should not be negative: '%s'" % (glyphName)) verticalOrigin = _getVerticalOrigin(self.otf, glyph) bounds = self.glyphBoundingBoxes[glyphName] top = bounds.yMax if bounds else 0 vmtx[glyphName] = (height, verticalOrigin - top)
Make the vmtx table. **This should not be called externally.** Subclasses may override or supplement this method to handle the table creation in a different way if desired.
def get_message(self): result = '' if self._data_struct is not None: result = self._data_struct[KEY_MESSAGE] return result
Return the message embedded in the JSON error response body, or an empty string if the JSON couldn't be parsed.
def list(self): databases = [] for dbname in os.listdir(self.path): databases.append(dbname) return list(reversed(sorted(databases)))
List all the databases on the given path. :return:
def WriteFromFD(self, src_fd, arcname=None, compress_type=None, st=None): yield self.WriteFileHeader( arcname=arcname, compress_type=compress_type, st=st) while 1: buf = src_fd.read(1024 * 1024) if not buf: break yield self.WriteFileChunk(buf) yield self.WriteFileFooter()
Write a zip member from a file like object. Args: src_fd: A file like object, must support seek(), tell(), read(). arcname: The name in the archive this should take. compress_type: Compression type (zipfile.ZIP_DEFLATED, or ZIP_STORED) st: An optional stat object to be used for setting headers. Raises: ArchiveAlreadyClosedError: If the zip if already closed. Yields: Chunks of binary data.
def get_allowed_methods(self, callback): if hasattr(callback, 'actions'): return [method.upper() for method in callback.actions.keys() if method != 'head'] return [ method for method in callback.cls().allowed_methods if method not in ('OPTIONS', 'HEAD') ]
Return a list of the valid HTTP methods for this endpoint.
def pub(self, topic, message): return self.send(' '.join((constants.PUB, topic)), message)
Publish to a topic
def times(x, y): def decorator(fn): def wrapped(*args, **kwargs): n = random.randint(x, y) for z in range(1, n): fn(*args, **kwargs) return wrapped return decorator
Do something a random amount of times between x & y
def recursive_apply(inval, func): if isinstance(inval, dict): return {k: recursive_apply(v, func) for k, v in inval.items()} elif isinstance(inval, list): return [recursive_apply(v, func) for v in inval] else: return func(inval)
Recursively apply a function to all levels of nested iterables :param inval: the object to run the function on :param func: the function that will be run on the inval
def get_account(self, account, use_sis_id=False, **kwargs): if use_sis_id: account_id = account uri_str = 'accounts/sis_account_id:{}' else: account_id = obj_or_id(account, "account", (Account,)) uri_str = 'accounts/{}' response = self.__requester.request( 'GET', uri_str.format(account_id), _kwargs=combine_kwargs(**kwargs) ) return Account(self.__requester, response.json())
Retrieve information on an individual account. :calls: `GET /api/v1/accounts/:id \ <https://canvas.instructure.com/doc/api/accounts.html#method.accounts.show>`_ :param account: The object or ID of the account to retrieve. :type account: int, str or :class:`canvasapi.account.Account` :param use_sis_id: Whether or not account_id is an sis ID. Defaults to `False`. :type use_sis_id: bool :rtype: :class:`canvasapi.account.Account`
def validate(self): validate_url = "https://api.pushover.net/1/users/validate.json" payload = { 'token': self.api_token, 'user': self.user, } return requests.post(validate_url, data=payload)
Validate the user and token, returns the Requests response.
async def close(self, code: int = 1000, reason: str = "") -> None: try: await asyncio.wait_for( self.write_close_frame(serialize_close(code, reason)), self.close_timeout, loop=self.loop, ) except asyncio.TimeoutError: self.fail_connection() try: await asyncio.wait_for( self.transfer_data_task, self.close_timeout, loop=self.loop ) except (asyncio.TimeoutError, asyncio.CancelledError): pass await asyncio.shield(self.close_connection_task)
This coroutine performs the closing handshake. It waits for the other end to complete the handshake and for the TCP connection to terminate. As a consequence, there's no need to await :meth:`wait_closed`; :meth:`close` already does it. :meth:`close` is idempotent: it doesn't do anything once the connection is closed. It's safe to wrap this coroutine in :func:`~asyncio.create_task` since errors during connection termination aren't particularly useful. ``code`` must be an :class:`int` and ``reason`` a :class:`str`.
def lrange(self, name, start, stop): with self.pipe as pipe: f = Future() res = pipe.lrange(self.redis_key(name), start, stop) def cb(): f.set([self.valueparse.decode(v) for v in res.result]) pipe.on_execute(cb) return f
Returns a range of items. :param name: str the name of the redis key :param start: integer representing the start index of the range :param stop: integer representing the size of the list. :return: Future()
def labels(data, label_column, color=None, font_name=FONT_NAME, font_size=14, anchor_x='left', anchor_y='top'): from geoplotlib.layers import LabelsLayer _global_config.layers.append(LabelsLayer(data, label_column, color, font_name, font_size, anchor_x, anchor_y))
Draw a text label for each sample :param data: data access object :param label_column: column in the data access object where the labels text is stored :param color: color :param font_name: font name :param font_size: font size :param anchor_x: anchor x :param anchor_y: anchor y
def _start_primary(self): self.em.start() self.em.set_secondary_state(_STATE_RUNNING) self._set_shared_instances()
Start as the primary
def populate_items(self, request): self._items = self.get_items(request) return self.items
populate and returns filtered items
def parent_tags(self): tags = set() for addr in self._addresses: if addr.attr == 'text': tags.add(addr.element.tag) tags.update(el.tag for el in addr.element.iterancestors()) tags.discard(HTMLFragment._root_tag) return frozenset(tags)
Provides tags of all parent HTML elements.
def prepare(self, data): result = {} if not self.fields: return data for fieldname, lookup in self.fields.items(): if isinstance(lookup, SubPreparer): result[fieldname] = lookup.prepare(data) else: result[fieldname] = self.lookup_data(lookup, data) return result
Handles transforming the provided data into the fielded data that should be exposed to the end user. Uses the ``lookup_data`` method to traverse dotted paths. Returns a dictionary of data as the response.
def _spacingx(node, max_dims, xoffset, xspace): x_spacing = _n_terminations(node) * xspace if x_spacing > max_dims[0]: max_dims[0] = x_spacing return xoffset - x_spacing / 2.
Determine the spacing of the current node depending on the number of the leaves of the tree
def get_networks_by_name(self, name: str) -> List[Network]: return self.session.query(Network).filter(Network.name.like(name)).all()
Get all networks with the given name. Useful for getting all versions of a given network.
def plot_vyx(self, colorbar=True, cb_orientation='vertical', cb_label=None, ax=None, show=True, fname=None, **kwargs): if cb_label is None: cb_label = self._vyx_label if ax is None: fig, axes = self.vyx.plot(colorbar=colorbar, cb_orientation=cb_orientation, cb_label=cb_label, show=False, **kwargs) if show: fig.show() if fname is not None: fig.savefig(fname) return fig, axes else: self.vyx.plot(colorbar=colorbar, cb_orientation=cb_orientation, cb_label=cb_label, ax=ax, **kwargs)
Plot the Vyx component of the tensor. Usage ----- x.plot_vyx([tick_interval, xlabel, ylabel, ax, colorbar, cb_orientation, cb_label, show, fname]) Parameters ---------- tick_interval : list or tuple, optional, default = [30, 30] Intervals to use when plotting the x and y ticks. If set to None, ticks will not be plotted. xlabel : str, optional, default = 'longitude' Label for the longitude axis. ylabel : str, optional, default = 'latitude' Label for the latitude axis. ax : matplotlib axes object, optional, default = None A single matplotlib axes object where the plot will appear. colorbar : bool, optional, default = True If True, plot a colorbar. cb_orientation : str, optional, default = 'vertical' Orientation of the colorbar: either 'vertical' or 'horizontal'. cb_label : str, optional, default = '$V_{yx}$' Text label for the colorbar. show : bool, optional, default = True If True, plot the image to the screen. fname : str, optional, default = None If present, and if axes is not specified, save the image to the specified file. kwargs : optional Keyword arguements that will be sent to the SHGrid.plot() and plt.imshow() methods.
def _get_variable(vid, variables): if isinstance(vid, six.string_types): vid = get_base_id(vid) else: vid = _get_string_vid(vid) for v in variables: if vid == get_base_id(v["id"]): return copy.deepcopy(v) raise ValueError("Did not find variable %s in \n%s" % (vid, pprint.pformat(variables)))
Retrieve an input variable from our existing pool of options.
def can_handle(self, text: str) -> bool: try: changelogs = self.split_changelogs(text) if not changelogs: return False for changelog in changelogs: _header, _changes = self.split_changelog(changelog) if not any((_header, _changes)): return False header = self.parse_header(_header) changes = self.parse_changes(_changes) if not any((header, changes)): return False except Exception: return False else: return True
Check whether this parser can parse the text
def parse(self, s): return datetime.datetime.strptime(s, self.date_format).date()
Parses a date string formatted like ``YYYY-MM-DD``.
def configure_for_kerberos(self, datanode_transceiver_port=None, datanode_web_port=None): args = dict() if datanode_transceiver_port: args['datanodeTransceiverPort'] = datanode_transceiver_port if datanode_web_port: args['datanodeWebPort'] = datanode_web_port return self._cmd('configureForKerberos', data=args, api_version=11)
Command to configure the cluster to use Kerberos for authentication. This command will configure all relevant services on a cluster for Kerberos usage. This command will trigger a GenerateCredentials command to create Kerberos keytabs for all roles in the cluster. @param datanode_transceiver_port: The HDFS DataNode transceiver port to use. This will be applied to all DataNode role configuration groups. If not specified, this will default to 1004. @param datanode_web_port: The HDFS DataNode web port to use. This will be applied to all DataNode role configuration groups. If not specified, this will default to 1006. @return: Reference to the submitted command. @since: API v11
def _update_entry(entry, status, directives): for directive, state in six.iteritems(directives): if directive == 'delete_others': status['delete_others'] = state continue for attr, vals in six.iteritems(state): status['mentioned_attributes'].add(attr) vals = _toset(vals) if directive == 'default': if vals and (attr not in entry or not entry[attr]): entry[attr] = vals elif directive == 'add': vals.update(entry.get(attr, ())) if vals: entry[attr] = vals elif directive == 'delete': existing_vals = entry.pop(attr, OrderedSet()) if vals: existing_vals -= vals if existing_vals: entry[attr] = existing_vals elif directive == 'replace': entry.pop(attr, None) if vals: entry[attr] = vals else: raise ValueError('unknown directive: ' + directive)
Update an entry's attributes using the provided directives :param entry: A dict mapping each attribute name to a set of its values :param status: A dict holding cross-invocation status (whether delete_others is True or not, and the set of mentioned attributes) :param directives: A dict mapping directive types to directive-specific state
def _get_value(self): x, y = self._point.x, self._point.y self._px, self._py = self._item_point.canvas.get_matrix_i2i(self._item_point, self._item_target).transform_point(x, y) return self._px, self._py
Return two delegating variables. Each variable should contain a value attribute with the real value.
def configure_arrays(self): self.science = self.hdulist['sci', 1].data self.err = self.hdulist['err', 1].data self.dq = self.hdulist['dq', 1].data if (self.ampstring == 'ABCD'): self.science = np.concatenate( (self.science, self.hdulist['sci', 2].data[::-1, :]), axis=1) self.err = np.concatenate( (self.err, self.hdulist['err', 2].data[::-1, :]), axis=1) self.dq = np.concatenate( (self.dq, self.hdulist['dq', 2].data[::-1, :]), axis=1) self.ingest_dark() self.ingest_flash() self.ingest_flatfield()
Get the SCI and ERR data.
def cache_key(*args, **kwargs): key = "" for arg in args: if callable(arg): key += ":%s" % repr(arg) else: key += ":%s" % str(arg) return key
Base method for computing the cache key with respect to the given arguments.
def badge(pipeline_id): if not pipeline_id.startswith('./'): pipeline_id = './' + pipeline_id pipeline_status = status.get(pipeline_id) status_color = 'lightgray' if pipeline_status.pipeline_details: status_text = pipeline_status.state().lower() last_execution = pipeline_status.get_last_execution() success = last_execution.success if last_execution else None if success is True: stats = last_execution.stats if last_execution else None record_count = stats.get('count_of_rows') if record_count is not None: status_text += ' (%d records)' % record_count status_color = 'brightgreen' elif success is False: status_color = 'red' else: status_text = "not found" return _make_badge_response('pipeline', status_text, status_color)
An individual pipeline status
async def sendto(self, data, component): active_pair = self._nominated.get(component) if active_pair: await active_pair.protocol.send_data(data, active_pair.remote_addr) else: raise ConnectionError('Cannot send data, not connected')
Send a datagram on the specified component. If the connection is not established, a `ConnectionError` is raised.
def _registerNode(self, nodeAddress, agentId, nodePort=5051): executor = self.executors.get(nodeAddress) if executor is None or executor.agentId != agentId: executor = self.ExecutorInfo(nodeAddress=nodeAddress, agentId=agentId, nodeInfo=None, lastSeen=time.time()) self.executors[nodeAddress] = executor else: executor.lastSeen = time.time() self.agentsByID[agentId] = nodeAddress return executor
Called when we get communication from an agent. Remembers the information about the agent by address, and the agent address by agent ID.
def pair_hmm_align_unaligned_seqs(seqs, moltype=DNA_cogent, params={}): seqs = LoadSeqs(data=seqs, moltype=moltype, aligned=False) try: s1, s2 = seqs.values() except ValueError: raise ValueError( "Pairwise aligning of seqs requires exactly two seqs.") try: gap_open = params['gap_open'] except KeyError: gap_open = 5 try: gap_extend = params['gap_extend'] except KeyError: gap_extend = 2 try: score_matrix = params['score_matrix'] except KeyError: score_matrix = make_dna_scoring_dict( match=1, transition=-1, transversion=-1) return local_pairwise(s1, s2, score_matrix, gap_open, gap_extend)
Checks parameters for pairwise alignment, returns alignment. Code from Greg Caporaso.
async def create(gc: GroupControl, name, slaves): click.echo("Creating group %s with slaves: %s" % (name, slaves)) click.echo(await gc.create(name, slaves))
Create new group
def _build_toc_node(docname, anchor="anchor", text="test text", bullet=False): reference = nodes.reference( "", "", internal=True, refuri=docname, anchorname=" *[nodes.Text(text, text)] ) para = addnodes.compact_paragraph("", "", reference) ret_list = nodes.list_item("", para) return nodes.bullet_list("", ret_list) if bullet else ret_list
Create the node structure that Sphinx expects for TOC Tree entries. The ``bullet`` argument wraps it in a ``nodes.bullet_list``, which is how you nest TOC Tree entries.
def _hdparm(args, failhard=True): cmd = 'hdparm {0}'.format(args) result = __salt__['cmd.run_all'](cmd) if result['retcode'] != 0: msg = '{0}: {1}'.format(cmd, result['stderr']) if failhard: raise CommandExecutionError(msg) else: log.warning(msg) return result['stdout']
Execute hdparm Fail hard when required return output when possible
def _start_loop(self): loop = GObject.MainLoop() bus = SystemBus() manager = bus.get(".NetworkManager") manager.onPropertiesChanged = self._vpn_signal_handler loop.run()
Starts main event handler loop, run in handler thread t.
def _upload_simple(self, upload_info, _=None): upload_result = self._api.upload_simple( upload_info.fd, upload_info.name, folder_key=upload_info.folder_key, filedrop_key=upload_info.filedrop_key, path=upload_info.path, file_size=upload_info.size, file_hash=upload_info.hash_info.file, action_on_duplicate=upload_info.action_on_duplicate) logger.debug("upload_result: %s", upload_result) upload_key = upload_result['doupload']['key'] return self._poll_upload(upload_key, 'upload/simple')
Simple upload and return quickkey Can be used for small files smaller than UPLOAD_SIMPLE_LIMIT_BYTES upload_info -- UploadInfo object check_result -- ignored
def batch_get_assets_history( self, parent, content_type, read_time_window, asset_names=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): if "batch_get_assets_history" not in self._inner_api_calls: self._inner_api_calls[ "batch_get_assets_history" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.batch_get_assets_history, default_retry=self._method_configs["BatchGetAssetsHistory"].retry, default_timeout=self._method_configs["BatchGetAssetsHistory"].timeout, client_info=self._client_info, ) request = asset_service_pb2.BatchGetAssetsHistoryRequest( parent=parent, content_type=content_type, read_time_window=read_time_window, asset_names=asset_names, ) return self._inner_api_calls["batch_get_assets_history"]( request, retry=retry, timeout=timeout, metadata=metadata )
Batch gets the update history of assets that overlap a time window. For RESOURCE content, this API outputs history with asset in both non-delete or deleted status. For IAM\_POLICY content, this API outputs history when the asset and its attached IAM POLICY both exist. This can create gaps in the output history. If a specified asset does not exist, this API returns an INVALID\_ARGUMENT error. Example: >>> from google.cloud import asset_v1 >>> from google.cloud.asset_v1 import enums >>> >>> client = asset_v1.AssetServiceClient() >>> >>> # TODO: Initialize `parent`: >>> parent = '' >>> >>> # TODO: Initialize `content_type`: >>> content_type = enums.ContentType.CONTENT_TYPE_UNSPECIFIED >>> >>> # TODO: Initialize `read_time_window`: >>> read_time_window = {} >>> >>> response = client.batch_get_assets_history(parent, content_type, read_time_window) Args: parent (str): Required. The relative name of the root asset. It can only be an organization number (such as "organizations/123"), a project ID (such as "projects/my-project-id")", or a project number (such as "projects/12345"). content_type (~google.cloud.asset_v1.types.ContentType): Required. The content type. read_time_window (Union[dict, ~google.cloud.asset_v1.types.TimeWindow]): Optional. The time window for the asset history. Both start\_time and end\_time are optional and if set, it must be after 2018-10-02 UTC. If end\_time is not set, it is default to current timestamp. If start\_time is not set, the snapshot of the assets at end\_time will be returned. The returned results contain all temporal assets whose time window overlap with read\_time\_window. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.asset_v1.types.TimeWindow` asset_names (list[str]): A list of the full names of the assets. For example: ``//compute.googleapis.com/projects/my_project_123/zones/zone1/instances/instance1``. See `Resource Names <https://cloud.google.com/apis/design/resource_names#full_resource_name>`__ and `Resource Name Format <https://cloud.google.com/resource-manager/docs/cloud-asset-inventory/resource-name-format>`__ for more info. The request becomes a no-op if the asset name list is empty, and the max size of the asset name list is 100 in one request. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.asset_v1.types.BatchGetAssetsHistoryResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
def _convert_template_option(template): option = {} extraction_method = template.get('extraction_method') if extraction_method == 'guess': option['guess'] = True elif extraction_method == 'lattice': option['lattice'] = True elif extraction_method == 'stream': option['stream'] = True option['pages'] = template.get('page') option['area'] = [round(template['y1'], 3), round(template['x1'], 3), round(template['y2'], 3), round(template['x2'], 3)] return option
Convert Tabula app template to tabula-py option Args: template (dict): Tabula app template Returns: `obj`:dict: tabula-py option
def autobuild_onlycopy(): try: family = utilities.get_family('module_settings.json') autobuild_release(family) Alias('release', os.path.join('build', 'output')) Default(['release']) except unit_test.IOTileException as e: print(e.format()) Exit(1)
Autobuild a project that does not require building firmware, pcb or documentation
def get_staff(self, gradebook_id, simple=False): staff_data = self.get( 'staff/{gradebookId}'.format( gradebookId=gradebook_id or self.gradebook_id ), params=None, ) if simple: simple_list = [] unraveled_list = self.unravel_staff(staff_data) for member in unraveled_list.__iter__(): simple_list.append({ 'accountEmail': member['accountEmail'], 'displayName': member['displayName'], 'role': member['role'], }) return simple_list return staff_data['data']
Get staff list for gradebook. Get staff list for the gradebook specified. Optionally, return a less detailed list by specifying ``simple = True``. If simple=True, return a list of dictionaries, one dictionary for each member. The dictionary contains a member's ``email``, ``displayName``, and ``role``. Members with multiple roles will appear in the list once for each role. Args: gradebook_id (str): unique identifier for gradebook, i.e. ``2314`` simple (bool): Return a staff list with less detail. Default is ``False``. Returns: An example return value is: .. code-block:: python { u'data': { u'COURSE_ADMIN': [ { u'accountEmail': u'benfranklin@mit.edu', u'displayName': u'Benjamin Franklin', u'editable': False, u'email': u'benfranklin@mit.edu', u'givenName': u'Benjamin', u'middleName': None, u'mitId': u'921344431', u'nickName': u'Benjamin', u'personId': 10710616, u'sortableName': u'Franklin, Benjamin', u'surname': u'Franklin', u'year': None }, ], u'COURSE_PROF': [ { u'accountEmail': u'dduck@mit.edu', u'displayName': u'Donald Duck', u'editable': False, u'email': u'dduck@mit.edu', u'givenName': u'Donald', u'middleName': None, u'mitId': u'916144889', u'nickName': u'Donald', u'personId': 8117160, u'sortableName': u'Duck, Donald', u'surname': u'Duck', u'year': None }, ], u'COURSE_TA': [ { u'accountEmail': u'hduck@mit.edu', u'displayName': u'Huey Duck', u'editable': False, u'email': u'hduck@mit.edu', u'givenName': u'Huey', u'middleName': None, u'mitId': u'920445024', u'nickName': u'Huey', u'personId': 1299059, u'sortableName': u'Duck, Huey', u'surname': u'Duck', u'year': None }, ] }, }
def get_authors(self, entry): try: return format_html_join( ', ', '<a href="{}" target="blank">{}</a>', [(author.get_absolute_url(), getattr(author, author.USERNAME_FIELD)) for author in entry.authors.all()]) except NoReverseMatch: return ', '.join( [conditional_escape(getattr(author, author.USERNAME_FIELD)) for author in entry.authors.all()])
Return the authors in HTML.
def as_text(self, max_rows=0, sep=" | "): if not max_rows or max_rows > self.num_rows: max_rows = self.num_rows omitted = max(0, self.num_rows - max_rows) labels = self._columns.keys() fmts = self._get_column_formatters(max_rows, False) rows = [[fmt(label, label=True) for fmt, label in zip(fmts, labels)]] for row in itertools.islice(self.rows, max_rows): rows.append([f(v, label=False) for v, f in zip(row, fmts)]) lines = [sep.join(row) for row in rows] if omitted: lines.append('... ({} rows omitted)'.format(omitted)) return '\n'.join([line.rstrip() for line in lines])
Format table as text.
def _estimate_centers_widths( self, unique_R, inds, X, W, init_centers, init_widths, template_centers, template_widths, template_centers_mean_cov, template_widths_mean_var_reci): init_estimate = np.hstack( (init_centers.ravel(), init_widths.ravel())) data_sigma = 1.0 / math.sqrt(2.0) * np.std(X) final_estimate = least_squares( self._residual_multivariate, init_estimate, args=( unique_R, inds, X, W, template_centers, template_widths, template_centers_mean_cov, template_widths_mean_var_reci, data_sigma), method=self.nlss_method, loss=self.nlss_loss, bounds=self.bounds, verbose=0, x_scale=self.x_scale, tr_solver=self.tr_solver) return final_estimate.x, final_estimate.cost
Estimate centers and widths Parameters ---------- unique_R : a list of array, Each element contains unique value in one dimension of coordinate matrix R. inds : a list of array, Each element contains the indices to reconstruct one dimension of original cooridnate matrix from the unique array. X : 2D array, with shape [n_voxel, n_tr] fMRI data from one subject. W : 2D array, with shape [K, n_tr] The weight matrix. init_centers : 2D array, with shape [K, n_dim] The initial values of centers. init_widths : 1D array The initial values of widths. template_centers: 1D array The template prior on centers template_widths: 1D array The template prior on widths template_centers_mean_cov: 2D array, with shape [K, cov_size] The template prior on centers' mean template_widths_mean_var_reci: 1D array The reciprocal of template prior on variance of widths' mean Returns ------- final_estimate.x: 1D array The newly estimated centers and widths. final_estimate.cost: float The cost value.
def _GetCachedEntryDataTypeMap( self, format_type, value_data, cached_entry_offset): if format_type not in self._SUPPORTED_FORMAT_TYPES: raise errors.ParseError('Unsupported format type: {0:d}'.format( format_type)) data_type_map_name = '' if format_type == self._FORMAT_TYPE_XP: data_type_map_name = 'appcompatcache_cached_entry_xp_32bit' elif format_type in (self._FORMAT_TYPE_8, self._FORMAT_TYPE_10): data_type_map_name = 'appcompatcache_cached_entry_header_8' else: cached_entry = self._ParseCommon2003CachedEntry( value_data, cached_entry_offset) if (cached_entry.path_offset_32bit == 0 and cached_entry.path_offset_64bit != 0): number_of_bits = '64' else: number_of_bits = '32' if format_type == self._FORMAT_TYPE_2003: data_type_map_name = ( 'appcompatcache_cached_entry_2003_{0:s}bit'.format(number_of_bits)) elif format_type == self._FORMAT_TYPE_VISTA: data_type_map_name = ( 'appcompatcache_cached_entry_vista_{0:s}bit'.format(number_of_bits)) elif format_type == self._FORMAT_TYPE_7: data_type_map_name = ( 'appcompatcache_cached_entry_7_{0:s}bit'.format(number_of_bits)) return self._GetDataTypeMap(data_type_map_name)
Determines the cached entry data type map. Args: format_type (int): format type. value_data (bytes): value data. cached_entry_offset (int): offset of the first cached entry data relative to the start of the value data. Returns: dtfabric.DataTypeMap: data type map which contains a data type definition, such as a structure, that can be mapped onto binary data or None if the data type map is not defined. Raises: ParseError: if the cached entry data type map cannot be determined.
def _unhandled_event_default(event): if isinstance(event, KeyboardEvent): c = event.key_code if c in (ord("X"), ord("x"), ord("Q"), ord("q")): raise StopApplication("User terminated app") if c in (ord(" "), ord("\n"), ord("\r")): raise NextScene()
Default unhandled event handler for handling simple scene navigation.
def Find(cls, setting_matcher, port_path=None, serial=None, timeout_ms=None): if port_path: device_matcher = cls.PortPathMatcher(port_path) usb_info = port_path elif serial: device_matcher = cls.SerialMatcher(serial) usb_info = serial else: device_matcher = None usb_info = 'first' return cls.FindFirst(setting_matcher, device_matcher, usb_info=usb_info, timeout_ms=timeout_ms)
Gets the first device that matches according to the keyword args.
def enable_vmm_statistics(self, enable): if not isinstance(enable, bool): raise TypeError("enable can only be an instance of type bool") self._call("enableVMMStatistics", in_p=[enable])
Enables or disables collection of VMM RAM statistics. in enable of type bool True enables statistics collection. raises :class:`VBoxErrorInvalidVmState` Machine session is not open. raises :class:`VBoxErrorInvalidObjectState` Session type is not direct.