code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _load_data(self): if self.raw_data is None and self.data_format is not FormatType.PYTHON: if self.file_path is None: raise ArgumentInvalid('One of "raw_data" or "file_path" should be set!') if not os.path.isfile(self.file_path) or not os.access(self.file_path, os.R_OK): raise ArgumentInvalid('"file_path" should be a valid path to an exist file with read permission!') with open(self.file_path) as f: self.raw_data = f.read()
Load data from raw_data or file_path
def get_doc(additional_doc=False, field_prefix='$', field_suffix=':', indent=4): if additional_doc: f = fields.copy() f.update(additional_doc) else: f = fields field_length = get_max_field_length(f) field_length = field_length + len(field_prefix) + len(field_suffix) + 4 description_indent = ' ' * (indent + field_length) output = '' for field, description in sorted(f.items()): description = description['description'] field = ' ' * indent + field_prefix + field + ':' output += field.ljust(field_length) + \ textwrap.fill( description, width=78, initial_indent=description_indent, subsequent_indent=description_indent )[field_length:] + '\n\n\n' return output
Return a formated string containing documentation about the audio fields.
def find_module_path(): master_modname = __name__.split(".", 1)[0] master_module = sys.modules[master_modname] path = os.path.dirname(inspect.getsourcefile(master_module)) return path
find where the master module is located
def mousePressEvent(self, event): if self.scene is not None: x_in_scene = self.mapToScene(event.pos()).x() window_length = self.parent.value('window_length') window_start = int(floor(x_in_scene / window_length) * window_length) if self.parent.notes.annot is not None: window_start = self.parent.notes.annot.get_epoch_start( window_start) self.update_position(window_start)
Jump to window when user clicks on overview. Parameters ---------- event : instance of QtCore.QEvent it contains the position that was clicked.
def is_bool(tg_type, inc_array=False): global _scalar_bool_types, _array_bool_types if tg_type in _scalar_bool_types: return True if not inc_array: return False return tg_type in _array_bool_types
Tells if the given tango type is boolean :param tg_type: tango type :type tg_type: :class:`tango.CmdArgType` :param inc_array: (optional, default is False) determines if include array in the list of checked types :type inc_array: :py:obj:`bool` :return: True if the given tango type is boolean or False otherwise :rtype: :py:obj:`bool`
def listscripts(self): code, data, listing = self.__send_command( "LISTSCRIPTS", withcontent=True) if code == "NO": return None ret = [] active_script = None for l in listing.splitlines(): if self.__size_expr.match(l): continue m = re.match(br'"([^"]+)"\s*(.+)', l) if m is None: ret += [l.strip(b'"').decode("utf-8")] continue script = m.group(1).decode("utf-8") if self.__active_expr.match(m.group(2)): active_script = script continue ret += [script] self.__dprint(ret) return (active_script, ret)
List available scripts. See MANAGESIEVE specifications, section 2.7 :returns: a 2-uple (active script, [script1, ...])
def list_app(self): kwd = { 'pager': '', 'title': '' } self.render('user/info_list/list_app.html', kwd=kwd, userinfo=self.userinfo)
List the apps.
def remove_network_profile(self, obj, params): self._logger.debug("delete profile: %s", params.ssid) str_buf = create_unicode_buffer(params.ssid) ret = self._wlan_delete_profile(self._handle, obj['guid'], str_buf) self._logger.debug("delete result %d", ret)
Remove the specified AP profile.
def reset_modified(self): self.modified_fields = set() for field_name, field in self.schema.normal_fields.items(): if isinstance(field, ObjectField): self.modified_fields.add(field_name)
reset field modification tracking this is handy for when you are loading a new Orm with the results from a query and you don't want set() to do anything, you can Orm(**fields) and then orm.reset_modified() to clear all the passed in fields from the modified list
def variant_overlaps_interval( variant_start, n_ref_bases, interval_start, interval_end): if n_ref_bases == 0: return interval_start <= variant_start and interval_end >= variant_start variant_end = variant_start + n_ref_bases return interval_start <= variant_end and interval_end >= variant_start
Does a variant overlap a given interval on the same chromosome? Parameters ---------- variant_start : int Inclusive base-1 position of variant's starting location (or location before an insertion) n_ref_bases : int Number of reference bases affect by variant (used to compute end coordinate or determine whether variant is an insertion) interval_start : int Interval's inclusive base-1 start position interval_end : int Interval's inclusive base-1 end position
def deploy(self, pathobj, fobj, md5=None, sha1=None, parameters=None): if isinstance(fobj, urllib3.response.HTTPResponse): fobj = HTTPResponseWrapper(fobj) url = str(pathobj) if parameters: url += ";%s" % encode_matrix_parameters(parameters) headers = {} if md5: headers['X-Checksum-Md5'] = md5 if sha1: headers['X-Checksum-Sha1'] = sha1 text, code = self.rest_put_stream(url, fobj, headers=headers, auth=pathobj.auth, verify=pathobj.verify, cert=pathobj.cert) if code not in [200, 201]: raise RuntimeError("%s" % text)
Uploads a given file-like object HTTP chunked encoding will be attempted
def zero_missing_data(data1, data2): nans = xu.logical_and(xu.isnan(data1), xu.logical_not(xu.isnan(data2))) return data1.where(~nans, 0)
Replace NaN values with zeros in data1 if the data is valid in data2.
def get_closest(self, sma): index = (np.abs(self.sma - sma)).argmin() return self._list[index]
Return the `~photutils.isophote.Isophote` instance that has the closest semimajor axis length to the input semimajor axis. Parameters ---------- sma : float The semimajor axis length. Returns ------- isophote : `~photutils.isophote.Isophote` instance The isophote with the closest semimajor axis value.
def get_unique_directives(ast): if not ast.directives: return dict() result = dict() for directive_obj in ast.directives: directive_name = directive_obj.name.value if directive_name in ALLOWED_DUPLICATED_DIRECTIVES: pass elif directive_name in result: raise GraphQLCompilationError(u'Directive was unexpectedly applied twice in the same ' u'location: {} {}'.format(directive_name, ast.directives)) else: result[directive_name] = directive_obj return result
Return a dict of directive name to directive object for the given AST node. Any directives that are allowed to exist more than once on any AST node are ignored. For any directives that can only exist up to once, we verify that they are not duplicated raising GraphQLCompilationError in case we find them more than once on the AST node. Args: ast: GraphQL AST node, obtained from the graphql library Returns: dict of string to directive object
def _list_existing(filesystem, glob, paths): globs = _constrain_glob(glob, paths) time_start = time.time() listing = [] for g in sorted(globs): logger.debug('Listing %s', g) if filesystem.exists(g): listing.extend(filesystem.listdir(g)) logger.debug('%d %s listings took %f s to return %d items', len(globs), filesystem.__class__.__name__, time.time() - time_start, len(listing)) return set(listing)
Get all the paths that do in fact exist. Returns a set of all existing paths. Takes a luigi.target.FileSystem object, a str which represents a glob and a list of strings representing paths.
def build(path: str) -> dict: update_base_image(path) match = file_pattern.search(os.path.basename(path)) build_id = match.group('id') tags = [ '{}:{}-{}'.format(HUB_PREFIX, version, build_id), '{}:latest-{}'.format(HUB_PREFIX, build_id), '{}:current-{}'.format(HUB_PREFIX, build_id) ] if build_id == 'standard': tags.append('{}:latest'.format(HUB_PREFIX)) command = 'docker build --file "{}" {} .'.format( path, ' '.join(['-t {}'.format(t) for t in tags]) ) print('[BUILDING]:', build_id) os.system(command) return dict( id=build_id, path=path, command=command, tags=tags )
Builds the container from the specified docker file path
def expect_column_values_to_be_of_type( self, column, type_, mostly=None, result_format=None, include_config=False, catch_exceptions=None, meta=None ): raise NotImplementedError
Expect each column entry to be a specified data type. expect_column_values_to_be_of_type is a :func:`column_map_expectation <great_expectations.data_asset.dataset.Dataset.column_map_expectation>`. Args: column (str): \ The column name. type\_ (str): \ A string representing the data type that each column should have as entries. For example, "double integer" refers to an integer with double precision. Keyword Args: mostly (None or a float between 0 and 1): \ Return `"success": True` if at least mostly percent of values match the expectation. \ For more detail, see :ref:`mostly`. Other Parameters: result_format (str or None): \ Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`. include_config (boolean): \ If True, then include the expectation config as part of the result object. \ For more detail, see :ref:`include_config`. catch_exceptions (boolean or None): \ If True, then catch exceptions and include them as part of the result object. \ For more detail, see :ref:`catch_exceptions`. meta (dict or None): \ A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \ For more detail, see :ref:`meta`. Returns: A JSON-serializable expectation result object. Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and :ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`. Warning: expect_column_values_to_be_of_type is slated for major changes in future versions of great_expectations. As of v0.3, great_expectations is exclusively based on pandas, which handles typing in its own peculiar way. Future versions of great_expectations will allow for Datasets in SQL, spark, etc. When we make that change, we expect some breaking changes in parts of the codebase that are based strongly on pandas notions of typing. See also: expect_column_values_to_be_in_type_list
def create_element(tag: str, name: str = None, base: type = None, attr: dict = None) -> Node: from wdom.web_node import WdomElement from wdom.tag import Tag from wdom.window import customElements if attr is None: attr = {} if name: base_class = customElements.get((name, tag)) else: base_class = customElements.get((tag, None)) if base_class is None: attr['_registered'] = False base_class = base or WdomElement if issubclass(base_class, Tag): return base_class(**attr) return base_class(tag, **attr)
Create element with a tag of ``name``. :arg str name: html tag. :arg type base: Base class of the created element (defatlt: ``WdomElement``) :arg dict attr: Attributes (key-value pairs dict) of the new element.
def object_data(self, multihash, **kwargs): r args = (multihash,) return self._client.request('/object/data', args, **kwargs)
r"""Returns the raw bytes in an IPFS object. .. code-block:: python >>> c.object_data('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') b'\x08\x01' Parameters ---------- multihash : str Key of the object to retrieve, in base58-encoded multihash format Returns ------- str : Raw object data
def get_remove_security_group_commands(self, sg_id, profile): return self._get_interface_commands(sg_id, profile, delete=True)
Commands for removing ACL from interface
def _hook(self, hook_name, doc_uri=None, **kwargs): doc = self.workspace.get_document(doc_uri) if doc_uri else None hook_handlers = self.config.plugin_manager.subset_hook_caller(hook_name, self.config.disabled_plugins) return hook_handlers(config=self.config, workspace=self.workspace, document=doc, **kwargs)
Calls hook_name and returns a list of results from all registered handlers
def repmi(instr, marker, value, lenout=None): if lenout is None: lenout = ctypes.c_int(len(instr) + len(marker) + 15) instr = stypes.stringToCharP(instr) marker = stypes.stringToCharP(marker) value = ctypes.c_int(value) out = stypes.stringToCharP(lenout) libspice.repmi_c(instr, marker, value, lenout, out) return stypes.toPythonString(out)
Replace a marker with an integer. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/repmi_c.html :param instr: Input string. :type instr: str :param marker: Marker to be replaced. :type marker: str :param value: Replacement value. :type value: int :param lenout: Optional available space in output string. :type lenout: int :return: Output string. :rtype: str
def reset_default(verbose=False): paths = [jupyter_custom, jupyter_nbext] for fpath in paths: custom = '{0}{1}{2}.css'.format(fpath, os.sep, 'custom') try: os.remove(custom) except Exception: pass try: delete_font_files() except Exception: check_directories() delete_font_files() copyfile(defaultCSS, jupyter_customcss) copyfile(defaultJS, jupyter_customjs) if os.path.exists(theme_name_file): os.remove(theme_name_file) if verbose: print("Reset css and font defaults in:\n{} &\n{}".format(*paths))
Remove custom.css and custom fonts
def userinfo(self, access_token): return self.get( url='https://{}/userinfo'.format(self.domain), headers={'Authorization': 'Bearer {}'.format(access_token)} )
Returns the user information based on the Auth0 access token. This endpoint will work only if openid was granted as a scope for the access_token. Args: access_token (str): Auth0 access token (obtained during login). Returns: The user profile.
def parse_xml_string(self, xml, id_generator=None): if id_generator is not None: warnings.warn( "Passing an id_generator directly is deprecated " "in favor of constructing the Runtime with the id_generator", DeprecationWarning, stacklevel=2, ) id_generator = id_generator or self.id_generator if isinstance(xml, six.binary_type): io_type = BytesIO else: io_type = StringIO return self.parse_xml_file(io_type(xml), id_generator)
Parse a string of XML, returning a usage id.
def train_agent_real_env(env, learner, hparams, epoch): base_algo_str = hparams.base_algo train_hparams = trainer_lib.create_hparams(hparams.base_algo_params) rl_utils.update_hparams_from_hparams( train_hparams, hparams, "real_" + base_algo_str + "_" ) if hparams.wm_policy_param_sharing: train_hparams.optimizer_zero_grads = True env_fn = rl.make_real_env_fn(env) num_env_steps = real_env_step_increment(hparams) learner.train( env_fn, train_hparams, simulated=False, save_continuously=False, epoch=epoch, sampling_temp=hparams.real_sampling_temp, num_env_steps=num_env_steps, ) env.reset()
Train the PPO agent in the real environment.
def execute(self, context): for ware in self.middleware: ware.premessage(context) context = ware.bind(context) ware.postmessage(context) return context
Execute the strategies on the given context
def get_url(url, data=None, cached=True, cache_key=None, crawler='urllib'): if cache_key is None: cache_key = url cache_path = cache_path_for_url(cache_key) if cached and os.path.exists(cache_path): with open(cache_path) as f: html = f.read().decode('utf-8') else: if FAIL_IF_NOT_CACHED: raise BaseException("URL is not in cache and FAIL_IF_NOT_CACHED is True: %s" % url) crawler_fn = CRAWLERS[crawler] status, html = crawler_fn(url, data) if status != 200: raise HttpNotFound(url) _ensure_directory(CACHE_DIRECTORY) with open(cache_path, 'w') as f: f.write(html.encode('utf-8')) return html
Retrieves the HTML code for a given URL. If a cached version is not available, uses phantom_retrieve to fetch the page. data - Additional data that gets passed onto the crawler. cached - If True, retrieves the URL from the cache if it is available. If False, will still store the page in cache. cache_key - If set, will be used instead of the URL to lookup the cached version of the page. crawler - A string referencing one of the builtin crawlers. Returns the HTML as a unicode string. Raises a HttpNotFound exception if the page could not be found.
def qteRemoveHighlighting(self, widgetObj): data = self.qteMacroData(widgetObj) if not data: return if not data.matchingPositions: return self.highlightCharacters(widgetObj, data.matchingPositions, QtCore.Qt.black, 50, data.oldCharFormats) data.matchingPositions = None data.oldCharFormats = None self.qteSaveMacroData(data, widgetObj)
Remove the highlighting from previously highlighted characters. The method access instance variables to determine which characters are currently highlighted and have to be converted to non-highlighted ones. |Args| * ``widgetObj`` (**QWidget**): the ``QTextEdit`` to use. |Returns| * **None** |Raises| * **None**
def any_text_to_fernet_key(self, text): md5 = fingerprint.fingerprint.of_text(text) fernet_key = base64.b64encode(md5.encode("utf-8")) return fernet_key
Convert any text to a fernet key for encryption.
def _get_error_values(self, startingPercentage, endPercentage, startDate, endDate): if startDate is not None: possibleDates = filter(lambda date: date >= startDate, self._errorDates) if 0 == len(possibleDates): raise ValueError("%s does not represent a valid startDate." % startDate) startIdx = self._errorDates.index(min(possibleDates)) else: startIdx = int((startingPercentage * len(self._errorValues)) / 100.0) if endDate is not None: possibleDates = filter(lambda date: date <= endDate, self._errorDates) if 0 == len(possibleDates): raise ValueError("%s does not represent a valid endDate." % endDate) endIdx = self._errorDates.index(max(possibleDates)) + 1 else: endIdx = int((endPercentage * len(self._errorValues)) / 100.0) return self._errorValues[startIdx:endIdx]
Gets the defined subset of self._errorValues. Both parameters will be correct at this time. :param float startingPercentage: Defines the start of the interval. This has to be a value in [0.0, 100.0]. It represents the value, where the error calculation should be started. 25.0 for example means that the first 25% of all calculated errors will be ignored. :param float endPercentage: Defines the end of the interval. This has to be a value in [0.0, 100.0]. It represents the value, after which all error values will be ignored. 90.0 for example means that the last 10% of all local errors will be ignored. :param float startDate: Epoch representing the start date used for error calculation. :param float endDate: Epoch representing the end date used in the error calculation. :return: Returns a list with the defined error values. :rtype: list :raise: Raises a ValueError if startDate or endDate do not represent correct boundaries for error calculation.
def get_feature_penalty(self): if self.feature_penalty is None: self.feature_penalty = self.get_field('feature_penalty') return self.feature_penalty
Get the feature penalty of the Dataset. Returns ------- feature_penalty : numpy array or None Feature penalty for each feature in the Dataset.
def _extract_conjuction_elements_from_expression(expression): if isinstance(expression, BinaryComposition) and expression.operator == u'&&': for element in _extract_conjuction_elements_from_expression(expression.left): yield element for element in _extract_conjuction_elements_from_expression(expression.right): yield element else: yield expression
Return a generator for expressions that are connected by `&&`s in the given expression.
def register_optionables(self, optionables): if not isinstance(optionables, Iterable): raise TypeError('The optionables must be an iterable, given {}'.format(optionables)) optionables = tuple(optionables) if not optionables: return invalid_optionables = [s for s in optionables if not isinstance(s, type) or not issubclass(s, Optionable)] if invalid_optionables: raise TypeError('The following items from the given optionables are not Optionable ' 'subclasses:\n\t{}'.format('\n\t'.join(str(i) for i in invalid_optionables))) self._optionables.update(optionables)
Registers the given subsystem types. :param optionables: The Optionable types to register. :type optionables: :class:`collections.Iterable` containing :class:`pants.option.optionable.Optionable` subclasses.
def ensurearray(*args): input_is_array = any(isinstance(arg, numpy.ndarray) for arg in args) args = numpy.broadcast_arrays(*args) args.append(input_is_array) return args
Apply numpy's broadcast rules to the given arguments. This will ensure that all of the arguments are numpy arrays and that they all have the same shape. See ``numpy.broadcast_arrays`` for more details. It also returns a boolean indicating whether any of the inputs were originally arrays. Parameters ---------- *args : The arguments to check. Returns ------- list : A list with length ``N+1`` where ``N`` is the number of given arguments. The first N values are the input arguments as ``ndarrays``s. The last value is a boolean indicating whether any of the inputs was an array.
def apply_translation(self, translation): translation = np.asanyarray(translation, dtype=np.float64) if translation.shape != (3,): raise ValueError('Translation must be (3,)!') matrix = np.eye(4) matrix[:3, 3] = translation self.apply_transform(matrix)
Translate the current mesh. Parameters ---------- translation : (3,) float Translation in XYZ
def remove_nio(self, port_number): if port_number not in self._nios: raise NodeError("Port {} is not allocated".format(port_number)) nio = self._nios[port_number] if isinstance(nio, NIOUDP): self.manager.port_manager.release_udp_port(nio.lport, self._project) log.info('Cloud "{name}" [{id}]: NIO {nio} removed from port {port}'.format(name=self._name, id=self._id, nio=nio, port=port_number)) del self._nios[port_number] if self._ubridge_hypervisor and self._ubridge_hypervisor.is_running(): yield from self._delete_ubridge_connection(port_number) yield from self.start() return nio
Removes the specified NIO as member of cloud. :param port_number: allocated port number :returns: the NIO that was bound to the allocated port
def leave_group(self, group_id, timeout=None): self._post( '/v2/bot/group/{group_id}/leave'.format(group_id=group_id), timeout=timeout )
Call leave group API. https://devdocs.line.me/en/#leave Leave a group. :param str group_id: Group ID :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a (connect timeout, read timeout) float tuple. Default is self.http_client.timeout :type timeout: float | tuple(float, float)
def add_variable(self, key, value, system_wide=False): if system_wide: javabridge.call(self.jobject, "addVariableSystemWide", "(Ljava/lang/String;Ljava/lang/String;)V", key, value) else: javabridge.call(self.jobject, "addVariable", "(Ljava/lang/String;Ljava/lang/String;)V", key, value)
Adds the environment variable. :param key: the name of the variable :type key: str :param value: the value :type value: str :param system_wide: whether to add the variable system wide :type system_wide: bool
def connect(self, broker, port=1883, client_id="", clean_session=True): logger.info('Connecting to %s at port %s' % (broker, port)) self._connected = False self._unexpected_disconnect = False self._mqttc = mqtt.Client(client_id, clean_session) self._mqttc.on_connect = self._on_connect self._mqttc.on_disconnect = self._on_disconnect if self._username: self._mqttc.username_pw_set(self._username, self._password) self._mqttc.connect(broker, int(port)) timer_start = time.time() while time.time() < timer_start + self._loop_timeout: if self._connected or self._unexpected_disconnect: break; self._mqttc.loop() if self._unexpected_disconnect: raise RuntimeError("The client disconnected unexpectedly") logger.debug('client_id: %s' % self._mqttc._client_id) return self._mqttc
Connect to an MQTT broker. This is a pre-requisite step for publish and subscribe keywords. `broker` MQTT broker host `port` broker port (default 1883) `client_id` if not specified, a random id is generated `clean_session` specifies the clean session flag for the connection Examples: Connect to a broker with default port and client id | Connect | 127.0.0.1 | Connect to a broker by specifying the port and client id explicitly | Connect | 127.0.0.1 | 1883 | test.client | Connect to a broker with clean session flag set to false | Connect | 127.0.0.1 | clean_session=${false} |
def remove_arg(self, arg): self.args = [arg_.strip() for arg_ in self.args if arg_.strip()] for arg_ in list(self.args): if arg_.lower() == arg.lower(): self.args.remove(arg_)
Remove an arg to the arg list
def archive(class_obj: type) -> type: assert isinstance(class_obj, type), "class_obj is not a Class" global _archive_resource_type _archive_resource_type = class_obj return class_obj
Decorator to annotate the Archive class. Registers the decorated class as the Archive known type.
def report_error_event(self, error_report): project_name = self._gapic_api.project_path(self._project) error_report_payload = report_errors_service_pb2.ReportedErrorEvent() ParseDict(error_report, error_report_payload) self._gapic_api.report_error_event(project_name, error_report_payload)
Uses the gapic client to report the error. :type error_report: dict :param error_report: payload of the error report formatted according to https://cloud.google.com/error-reporting/docs/formatting-error-messages This object should be built using Use :meth:~`google.cloud.error_reporting.client._build_error_report`
def _s3_path_split(s3_path): if not s3_path.startswith('s3://'): raise ValueError('s3_path is expected to start with \'s3://\', ' 'but was {}'.format(s3_path)) bucket_key = s3_path[len('s3://'):] bucket_name, key = bucket_key.split('/', 1) return S3Path(bucket_name, key)
Split an S3 path into bucket and key. Parameters ---------- s3_path : str Returns ------- splitted : (str, str) (bucket, key) Examples -------- >>> _s3_path_split('s3://my-bucket/foo/bar.jpg') S3Path(bucket_name='my-bucket', key='foo/bar.jpg')
def anti_clobber_dir_path(dir_path, suffix='.d'): dir_path = os.path.normpath(dir_path) parts = dir_path.split(os.sep) for index in range(len(parts)): test_path = os.sep.join(parts[:index + 1]) if os.path.isfile(test_path): parts[index] += suffix return os.sep.join(parts) return dir_path
Return a directory path free of filenames. Args: dir_path (str): A directory path. suffix (str): The suffix to append to the part of the path that is a file. Returns: str
def create_vnet(access_token, subscription_id, resource_group, name, location, address_prefix='10.0.0.0/16', subnet_prefix='10.0.0.0/16', nsg_id=None): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/virtualNetworks/', name, '?api-version=', NETWORK_API]) vnet_body = {'location': location} properties = {'addressSpace': {'addressPrefixes': [address_prefix]}} subnet = {'name': 'subnet'} subnet['properties'] = {'addressPrefix': subnet_prefix} if nsg_id is not None: subnet['properties']['networkSecurityGroup'] = {'id': nsg_id} properties['subnets'] = [subnet] vnet_body['properties'] = properties body = json.dumps(vnet_body) return do_put(endpoint, body, access_token)
Create a VNet with specified name and location. Optional subnet address prefix.. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. name (str): Name of the new VNet. location (str): Azure data center location. E.g. westus. address_prefix (str): Optional VNet address prefix. Default '10.0.0.0/16'. subnet_prefix (str): Optional subnet address prefix. Default '10.0.0.0/16'. nsg_id (str): Optional Netwrok Security Group resource Id. Default None. Returns: HTTP response. VNet JSON body.
def transform_around(matrix, point): point = np.asanyarray(point) matrix = np.asanyarray(matrix) dim = len(point) if matrix.shape != (dim + 1, dim + 1): raise ValueError('matrix must be (d+1, d+1)') translate = np.eye(dim + 1) translate[:dim, dim] = -point result = np.dot(matrix, translate) translate[:dim, dim] = point result = np.dot(translate, result) return result
Given a transformation matrix, apply its rotation around a point in space. Parameters ---------- matrix: (4,4) or (3, 3) float, transformation matrix point: (3,) or (2,) float, point in space Returns --------- result: (4,4) transformation matrix
def _render_our_module_key_flags(self, module, output_lines, prefix=''): key_flags = self.get_key_flags_for_module(module) if key_flags: self._render_module_flags(module, key_flags, output_lines, prefix)
Returns a help string for the key flags of a given module. Args: module: module|str, the module to render key flags for. output_lines: [str], a list of strings. The generated help message lines will be appended to this list. prefix: str, a string that is prepended to each generated help line.
def set_value(self, obj, value): if value: obj.set_field_value(self.name, value) else: self.delete_value(obj)
Sets value to model if not empty
def getrawpartid(self, msgid, partid, stream=sys.stdout): parts = [part for hdr,part in self._get(msgid)] part = parts[int(partid)] pl = part.get_payload(decode=True) if pl != None: print(pl, file=stream)
Get a specific part from the message and print it raw.
def Concat(*args: Union[BitVec, List[BitVec]]) -> BitVec: if len(args) == 1 and isinstance(args[0], list): bvs = args[0] else: bvs = cast(List[BitVec], args) nraw = z3.Concat([a.raw for a in bvs]) annotations = [] bitvecfunc = False for bv in bvs: annotations += bv.annotations if isinstance(bv, BitVecFunc): bitvecfunc = True if bitvecfunc: return BitVecFunc( raw=nraw, func_name=None, input_=None, annotations=annotations ) return BitVec(nraw, annotations)
Create a concatenation expression. :param args: :return:
def remove(self, container, instances=None, map_name=None, **kwargs): return self.run_actions('remove', container, instances=instances, map_name=map_name, **kwargs)
Remove instances from a container configuration. :param container: Container name. :type container: unicode | str :param instances: Instance names to remove. If not specified, will remove all instances as specified in the configuration (or just one default instance). :type instances: collections.Iterable[unicode | str | NoneType] :param map_name: Container map name. Optional - if not provided the default map is used. :type map_name: unicode | str :param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to the main container removal. :return: Return values of removed containers. :rtype: list[dockermap.map.runner.ActionOutput]
def decode(self, value, value_type): if value_type == 'i': return int(value) if value_type == 'z': value = zlib.decompress(value) value_type = 'p' if value_type == 'p': return pickle.loads(force_bytes(value)) raise ValueError( "Unknown value_type '{}' read from the cache table." .format(value_type), )
Take a value blob and its value_type one-char code and convert it back to a python object
def validate_required(self, value): if self.required and (value is None or value==''): raise MissingFieldError(self.name)
Validates the given value agains this field's 'required' property
def new_worker(self, name: str): if not self.running: return self.immediate_worker worker = self._new_worker(name) self._start_worker(worker) return worker
Creates a new Worker and start a new Thread with it. Returns the Worker.
def create_module(clear_target, target): if os.path.exists(target): if clear_target: shutil.rmtree(target) else: log("Target exists! Use --clear to delete it first.", emitter='MANAGE') sys.exit(2) done = False info = None while not done: info = _ask_questionnaire() pprint(info) done = _ask('Is the above correct', default='y', data_type='bool') augmented_info = _augment_info(info) log("Constructing module %(plugin_name)s" % info) _construct_module(augmented_info, target)
Creates a new template HFOS plugin module
def _lambda_add_s3_event_source(awsclient, arn, event, bucket, prefix, suffix): json_data = { 'LambdaFunctionConfigurations': [{ 'LambdaFunctionArn': arn, 'Id': str(uuid.uuid1()), 'Events': [event] }] } filter_rules = build_filter_rules(prefix, suffix) json_data['LambdaFunctionConfigurations'][0].update({ 'Filter': { 'Key': { 'FilterRules': filter_rules } } }) client_s3 = awsclient.get_client('s3') bucket_configurations = client_s3.get_bucket_notification_configuration( Bucket=bucket) bucket_configurations.pop('ResponseMetadata') if 'LambdaFunctionConfigurations' in bucket_configurations: bucket_configurations['LambdaFunctionConfigurations'].append( json_data['LambdaFunctionConfigurations'][0] ) else: bucket_configurations['LambdaFunctionConfigurations'] = json_data[ 'LambdaFunctionConfigurations'] response = client_s3.put_bucket_notification_configuration( Bucket=bucket, NotificationConfiguration=bucket_configurations ) return json2table(response)
Use only prefix OR suffix :param arn: :param event: :param bucket: :param prefix: :param suffix: :return:
def get_init(self): suffix = self._separator + "%s" % str(self._counter_init) return self._base_name + suffix
Return initial name.
def app_to_object(self): if self.tagClass != Tag.applicationTagClass: raise ValueError("application tag required") klass = self._app_tag_class[self.tagNumber] if not klass: return None return klass(self)
Return the application object encoded by the tag.
def markdownify(markdown_content): try: return markdown.markdown( markdown_content, safe_mode=MARTOR_MARKDOWN_SAFE_MODE, extensions=MARTOR_MARKDOWN_EXTENSIONS, extension_configs=MARTOR_MARKDOWN_EXTENSION_CONFIGS ) except Exception: raise VersionNotCompatible("The markdown isn't compatible, please reinstall " "your python markdown into Markdown>=3.0")
Render the markdown content to HTML. Basic: >>> from martor.utils import markdownify >>> content = "![awesome](http://i.imgur.com/hvguiSn.jpg)" >>> markdownify(content) '<p><img alt="awesome" src="http://i.imgur.com/hvguiSn.jpg" /></p>' >>>
def _set_logger(self): self.logger.propagate = False hdl = logging.StreamHandler() fmt_str = '[querier][%(levelname)s] %(message)s' hdl.setFormatter(logging.Formatter(fmt_str)) self.logger.addHandler(hdl)
change log format.
def process(self): if not self.timesketch_api.session: message = 'Could not connect to Timesketch server' self.state.add_error(message, critical=True) named_timelines = [] for description, path in self.state.input: if not description: description = 'untitled timeline for '+path named_timelines.append((description, path)) try: self.timesketch_api.export_artifacts(named_timelines, self.sketch_id) except RuntimeError as e: self.state.add_error( 'Error occurred while working with Timesketch: {0:s}'.format(str(e)), critical=True) return sketch_url = self.timesketch_api.get_sketch_url(self.sketch_id) print('Your Timesketch URL is: {0:s}'.format(sketch_url)) self.state.output = sketch_url
Executes a Timesketch export.
def Import(context, request): errors = [] logs = [] logs.append("Generic XML Import is not available") results = {'errors': errors, 'log': logs} return json.dumps(results)
Read analysis results from an XML string
async def jsk_vc_join(self, ctx: commands.Context, *, destination: typing.Union[discord.VoiceChannel, discord.Member] = None): destination = destination or ctx.author if isinstance(destination, discord.Member): if destination.voice and destination.voice.channel: destination = destination.voice.channel else: return await ctx.send("Member has no voice channel.") voice = ctx.guild.voice_client if voice: await voice.move_to(destination) else: await destination.connect(reconnect=True) await ctx.send(f"Connected to {destination.name}.")
Joins a voice channel, or moves to it if already connected. Passing a voice channel uses that voice channel. Passing a member will use that member's current voice channel. Passing nothing will use the author's voice channel.
def check_layout_json(self): layout_json_file = 'layout.json' if self.layout_json_schema is None or not os.path.isfile(layout_json_file): return error = None status = True try: with open(layout_json_file) as fh: data = json.loads(fh.read()) validate(data, self.layout_json_schema) except SchemaError as e: status = False error = e except ValidationError as e: status = False error = e.message except ValueError: return self.validation_data['schema'].append({'filename': layout_json_file, 'status': status}) if error: self.validation_data['errors'].append( 'Schema validation failed for {} ({}).'.format(layout_json_file, error) ) else: self.check_layout_params()
Check all layout.json files for valid schema.
async def throttle_update_all_heaters(self): if (self._throttle_all_time is not None and dt.datetime.now() - self._throttle_all_time < MIN_TIME_BETWEEN_UPDATES): return self._throttle_all_time = dt.datetime.now() await self.find_all_heaters()
Throttle update all devices and rooms.
def _set_extensions(self): self._critical_extensions = set() for extension in self['single_extensions']: name = extension['extn_id'].native attribute_name = '_%s_value' % name if hasattr(self, attribute_name): setattr(self, attribute_name, extension['extn_value'].parsed) if extension['critical'].native: self._critical_extensions.add(name) self._processed_extensions = True
Sets common named extensions to private attributes and creates a list of critical extensions
def on_action_end(self, action, logs={}): for callback in self.callbacks: if callable(getattr(callback, 'on_action_end', None)): callback.on_action_end(action, logs=logs)
Called at end of each action for each callback in callbackList
def V(x, requires_grad=False, volatile=False): return map_over(x, lambda o: V_(o, requires_grad, volatile))
creates a single or a list of pytorch tensors, depending on input x.
def choose_type(cls, content_type): return cls.type_cls.SUBDIR if content_type in cls.subdir_types \ else cls.type_cls.FILE
Choose object type from content type.
def pkg_not_found(self, bol, pkg, message, eol): print("{0}No such package {1}: {2}{3}".format(bol, pkg, message, eol))
Print message when package not found
def get_state_actions(self, state, **kwargs): config_type = state.config_id.config_type if config_type == ItemType.CONTAINER: extra_data = kwargs else: extra_data = None if state.base_state == State.PRESENT: if ((config_type == ItemType.VOLUME and self.remove_attached) or (config_type == ItemType.CONTAINER and self.remove_persistent or not state.state_flags & StateFlags.PERSISTENT)): return [ItemAction(state, Action.REMOVE, extra_data=extra_data)] elif config_type == ItemType.NETWORK: connected_containers = state.extra_data.get('containers') if connected_containers: actions = [ItemAction(state, NetworkUtilAction.DISCONNECT_ALL, {'containers': connected_containers})] else: actions = [] actions.append(ItemAction(state, Action.REMOVE, extra_data=kwargs)) return actions
Removes containers that are stopped. Optionally skips persistent containers. Attached containers are skipped by default from removal but can optionally be included. :param state: Configuration state. :type state: dockermap.map.state.ConfigState :param kwargs: Additional keyword arguments. :return: Actions on the client, map, and configurations. :rtype: list[dockermap.map.action.ItemAction]
def parse_text_block(text: str) -> Tuple[str, str]: body, footer = '', '' if text: body = text.split('\n\n')[0] if len(text.split('\n\n')) == 2: footer = text.split('\n\n')[1] return body.replace('\n', ' '), footer.replace('\n', ' ')
This will take a text block and return a tuple with body and footer, where footer is defined as the last paragraph. :param text: The text string to be divided. :return: A tuple with body and footer, where footer is defined as the last paragraph.
def bsp_new_with_size(x: int, y: int, w: int, h: int) -> tcod.bsp.BSP: return Bsp(x, y, w, h)
Create a new BSP instance with the given rectangle. Args: x (int): Rectangle left coordinate. y (int): Rectangle top coordinate. w (int): Rectangle width. h (int): Rectangle height. Returns: BSP: A new BSP instance. .. deprecated:: 2.0 Call the :any:`BSP` class instead.
def pack(self, value=None): if value is None: output = self.header.pack() output += self.value.pack() return output elif isinstance(value, type(self)): return value.pack() else: msg = "{} is not an instance of {}".format(value, type(self).__name__) raise PackException(msg)
Pack the TLV in a binary representation. Returns: bytes: Binary representation of the struct object. Raises: :exc:`~.exceptions.ValidationError`: If validation fails.
def restore_sampler_state(self): state = self.db.getstate() or {} sampler_state = state.get('sampler', {}) self.__dict__.update(sampler_state) stoch_state = state.get('stochastics', {}) for sm in self.stochastics: try: sm.value = stoch_state[sm.__name__] except: warnings.warn( 'Failed to restore state of stochastic %s from %s backend' % (sm.__name__, self.db.__name__))
Restore the state of the sampler and to the state stored in the database.
def __prepare_dataset_parameter(self, dataset): if not isinstance(dataset, _SFrame): def raise_dataset_type_exception(): raise TypeError("The dataset parameter must be either an SFrame, " "or a dictionary of (str : list) or (str : value).") if type(dataset) is dict: if not all(type(k) is str for k in _six.iterkeys(dataset)): raise_dataset_type_exception() if all(type(v) in (list, tuple, _array.array) for v in _six.itervalues(dataset)): dataset = _SFrame(dataset) else: dataset = _SFrame({k : [v] for k, v in _six.iteritems(dataset)}) else: raise_dataset_type_exception() return dataset
Processes the dataset parameter for type correctness. Returns it as an SFrame.
def _add_members(self, catmembers): members = [x for x in catmembers if x['ns'] == 0] subcats = [x for x in catmembers if x['ns'] == 14] if 'members' in self.data: self.data['members'].extend(members) else: self.data.update({'members': members}) if subcats: if 'subcategories' in self.data: self.data['subcategories'].extend(subcats) else: self.data.update({'subcategories': subcats})
Adds category members and subcategories to data
def get_provides(self, ignored=tuple()): if self._provides is None: self._collect_requires_provides() d = self._provides if ignored: d = dict((k, v) for k, v in d.items() if not fnmatches(k, *ignored)) return d
a map of provided classes and class members, and what provides them. ignored is an optional list of globbed patterns indicating packages, classes, etc that shouldn't be included in the provides map
def __execute_bsh(self, instr): op0_val = self.read_operand(instr.operands[0]) op1_val = self.read_operand(instr.operands[1]) op1_size = instr.operands[1].size if extract_sign_bit(op1_val, op1_size) == 0: op2_val = op0_val << op1_val else: op2_val = op0_val >> twos_complement(op1_val, op1_size) self.write_operand(instr.operands[2], op2_val) return None
Execute BSH instruction.
def reduce(self, func, *initial): if len(initial) == 0: return _wrap(reduce(func, self)) elif len(initial) == 1: return _wrap(reduce(func, self, initial[0])) else: raise ValueError('reduce takes exactly one optional parameter for initial value')
Reduce sequence of elements using func. API mirrors functools.reduce >>> seq([1, 2, 3]).reduce(lambda x, y: x + y) 6 :param func: two parameter, associative reduce function :param initial: single optional argument acting as initial value :return: reduced value using func
def import_numpy(): try: imp.find_module('numpy') numpy_exists = True except ImportError: numpy_exists = False if numpy_exists: import numpy as np else: np = None return np
Returns the numpy module if it exists on the system, otherwise returns None.
def parse_for(self): lineno = self.stream.expect('name:for').lineno target = self.parse_assign_target(extra_end_rules=('name:in',)) self.stream.expect('name:in') iter = self.parse_tuple(with_condexpr=False, extra_end_rules=('name:recursive',)) test = None if self.stream.skip_if('name:if'): test = self.parse_expression() recursive = self.stream.skip_if('name:recursive') body = self.parse_statements(('name:endfor', 'name:else')) if next(self.stream).value == 'endfor': else_ = [] else: else_ = self.parse_statements(('name:endfor',), drop_needle=True) return nodes.For(target, iter, body, else_, test, recursive, lineno=lineno)
Parse a for loop.
def prog(self): if not self._prog: self._prog = self._parser.prog return self._prog
Program name.
def init_ui(self, ): self.mm = MenuManager.get() p = self.mm.menus['Jukebox'] self.menu = self.mm.create_menu("Preferences", p, command=self.run)
Create the menu \"Preferences\" under \"Jukebox\" to start the plugin :returns: None :rtype: None :raises: None
def team_profiles(self, team): return [Profile(raw) for raw in self._get('team/%s/social_media' % self.team_key(team))]
Get team's social media profiles linked on their TBA page. :param team: Team to get data on. :return: List of Profile objects.
def clean(self, argslist): result = [] for arg in argslist: if type(arg) == type([]): if len(result) > 0: result[-1] = result[-1] + "(*{})".format(len(self.clean(arg))) elif "/" not in arg[0]: msg.warn("argument to function call unrecognized. {}".format(arg)) else: cleaner = re.sub("[:,]+", "", arg).strip() if len(cleaner) > 0: result.append(cleaner) return result
Cleans the argslist.
def update(cls, **kwargs): q = cls._get_instance(**{'id': kwargs['id']}) if q: for k, v in kwargs.items(): setattr(q, k, v) _action_and_commit(q, session.add) else: cls.get_or_create(**kwargs)
If a record matching the instance id already exists in the database, update it. If a record matching the instance id does not already exist, create a new record.
def extract_length(self, lower_bound=None, upper_bound=None, new_path=None): if new_path is None: fraction = self.__class__(new_temp_path()) elif isinstance(new_path, FASTA): fraction = new_path else: fraction = self.__class__(new_path) if lower_bound is None: lower_bound = 0 if upper_bound is None: upper_bound = sys.maxint def fraction_iterator(): for read in self: if lower_bound <= len(read) <= upper_bound: yield read fraction.write(fraction_iterator()) fraction.close() return fraction
Extract a certain length fraction and place them in a new file.
def build(self): if self._clf is None: raise NeedToTrainExceptionBeforeDeployingException() return DeployedClassifier(self._category, self._term_doc_matrix._category_idx_store, self._term_doc_matrix._term_idx_store, self._term_doc_matrix_factory)
Builds Depoyed Classifier
def holtWintersConfidenceArea(requestContext, seriesList, delta=3): bands = holtWintersConfidenceBands(requestContext, seriesList, delta) results = areaBetween(requestContext, bands) for series in results: series.name = series.name.replace('areaBetween', 'holtWintersConfidenceArea') return results
Performs a Holt-Winters forecast using the series as input data and plots the area between the upper and lower bands of the predicted forecast deviations.
def _migrate(data: Mapping[str, Any]) -> SettingsData: next = dict(data) version = next.pop('_version', 0) target_version = len(_MIGRATIONS) migrations = _MIGRATIONS[version:] if len(migrations) > 0: log.info( "Migrating advanced settings from version {} to {}" .format(version, target_version)) for m in migrations: next = m(next) return next, target_version
Check the version integer of the JSON file data a run any necessary migrations to get us to the latest file format. Returns dictionary of settings and version migrated to
def accuracy_helper(egg, match='exact', distance='euclidean', features=None): def acc(lst): return len([i for i in np.unique(lst) if i>=0])/(egg.list_length) opts = dict(match=match, distance=distance, features=features) if match is 'exact': opts.update({'features' : 'item'}) recmat = recall_matrix(egg, **opts) if match in ['exact', 'best']: result = [acc(lst) for lst in recmat] elif match is 'smooth': result = np.mean(recmat, axis=1) else: raise ValueError('Match must be set to exact, best or smooth.') return np.nanmean(result, axis=0)
Computes proportion of words recalled Parameters ---------- egg : quail.Egg Data to analyze match : str (exact, best or smooth) Matching approach to compute recall matrix. If exact, the presented and recalled items must be identical (default). If best, the recalled item that is most similar to the presented items will be selected. If smooth, a weighted average of all presented items will be used, where the weights are derived from the similarity between the recalled item and each presented item. distance : str The distance function used to compare presented and recalled items. Applies only to 'best' and 'smooth' matching approaches. Can be any distance function supported by numpy.spatial.distance.cdist. Returns ---------- prop_recalled : numpy array proportion of words recalled
def merge(self, *args): for data in args: if isinstance(data, str): to_merge = load_string(data, self.context) if not to_merge: continue else: to_merge = data if not self.can_merge(to_merge): raise TypeError( 'Cannot merge myself:%s with %s. data: %s' \ % (type(self), type(data), data) ) self._merge(to_merge)
Merges this instance with new instances, in-place. :param \\*args: Configuration values to merge with current instance. :type \\*args: iterable
def _connect_temporarily(self, port_v, target=True): if target: handle = self._connection_v.to_handle() else: handle = self._connection_v.from_handle() port_v.add_connected_handle(handle, self._connection_v, moving=True) port_v.tmp_connect(handle, self._connection_v) self._connection_v.set_port_for_handle(port_v, handle) self._redraw_port(port_v)
Set a connection between the current connection and the given port :param rafcon.gui.mygaphas.items.ports.PortView port_v: The port to be connected :param bool target: Whether the connection origin or target should be connected
def read(self, file): content = self._read_content(file) self._validate(content) self._parse(content) return self
Reads the captions file.
def set_trace(self, frame=None): if hasattr(local, '_pdbpp_completing'): return if frame is None: frame = sys._getframe().f_back self._via_set_trace_frame = frame return super(Pdb, self).set_trace(frame)
Remember starting frame. This is used with pytest, which does not use pdb.set_trace().
def get_labels(input_dir): data_dir = _get_latest_data_dir(input_dir) labels_file = os.path.join(data_dir, 'labels') with file_io.FileIO(labels_file, 'r') as f: labels = f.read().rstrip().split('\n') return labels
Get a list of labels from preprocessed output dir.
def set_high_water_mark(socket, config): if config['high_water_mark']: if hasattr(zmq, 'HWM'): socket.setsockopt(zmq.HWM, config['high_water_mark']) else: socket.setsockopt(zmq.SNDHWM, config['high_water_mark']) socket.setsockopt(zmq.RCVHWM, config['high_water_mark'])
Set a high water mark on the zmq socket. Do so in a way that is cross-compatible with zeromq2 and zeromq3.
def coords_intersect(self): return set.intersection(*map( set, (getattr(arr, 'coords_intersect', arr.coords) for arr in self) ))
Coordinates of the arrays in this list that are used in all arrays