code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _score(cluster): x, y = zip(*cluster)[:2] return min(len(set(x)), len(set(y)))
score of the cluster, in this case, is the number of non-repetitive matches
def sadd(self, name, values, *args): with self.pipe as pipe: values = [self.valueparse.encode(v) for v in self._parse_values(values, args)] return pipe.sadd(self.redis_key(name), *values)
Add the specified members to the Set. :param name: str the name of the redis key :param values: a list of values or a simple value. :return: Future()
def encode_signature(sig_r, sig_s): if sig_s * 2 >= SECP256k1_order: log.debug("High-S to low-S") sig_s = SECP256k1_order - sig_s sig_bin = '{:064x}{:064x}'.format(sig_r, sig_s).decode('hex') assert len(sig_bin) == 64 sig_b64 = base64.b64encode(sig_bin) return sig_b64
Encode an ECDSA signature, with low-s
def _ScanEncryptedVolumeNode(self, scan_context, scan_node): if scan_node.type_indicator == definitions.TYPE_INDICATOR_APFS_CONTAINER: container_file_entry = resolver.Resolver.OpenFileEntry( scan_node.path_spec, resolver_context=self._resolver_context) fsapfs_volume = container_file_entry.GetAPFSVolume() try: is_locked = not apfs_helper.APFSUnlockVolume( fsapfs_volume, scan_node.path_spec, resolver.Resolver.key_chain) except IOError as exception: raise errors.BackEndError( 'Unable to unlock APFS volume with error: {0!s}'.format(exception)) else: file_object = resolver.Resolver.OpenFileObject( scan_node.path_spec, resolver_context=self._resolver_context) is_locked = not file_object or file_object.is_locked file_object.close() if is_locked: scan_context.LockScanNode(scan_node.path_spec) if scan_node.type_indicator == definitions.TYPE_INDICATOR_BDE: path_spec = self.ScanForFileSystem(scan_node.path_spec.parent) if path_spec: scan_context.AddScanNode(path_spec, scan_node.parent_node)
Scans an encrypted volume node for supported formats. Args: scan_context (SourceScannerContext): source scanner context. scan_node (SourceScanNode): source scan node. Raises: BackEndError: if the scan node cannot be unlocked. ValueError: if the scan context or scan node is invalid.
def assert_not_in(first, second, msg_fmt="{msg}"): if first in second: msg = "{!r} is in {!r}".format(first, second) fail(msg_fmt.format(msg=msg, first=first, second=second))
Fail if first is in a collection second. >>> assert_not_in("bar", [4, "foo", {}]) >>> assert_not_in("foo", [4, "foo", {}]) Traceback (most recent call last): ... AssertionError: 'foo' is in [4, 'foo', {}] The following msg_fmt arguments are supported: * msg - the default error message * first - the element looked for * second - the container looked in
def find_newline(source): assert not isinstance(source, unicode) counter = collections.defaultdict(int) for line in source: if line.endswith(CRLF): counter[CRLF] += 1 elif line.endswith(CR): counter[CR] += 1 elif line.endswith(LF): counter[LF] += 1 return (sorted(counter, key=counter.get, reverse=True) or [LF])[0]
Return type of newline used in source. Input is a list of lines.
def _apply_sort(cursor, sort_by, sort_direction): if sort_direction is not None and sort_direction.lower() == "desc": sort = pymongo.DESCENDING else: sort = pymongo.ASCENDING return cursor.sort(sort_by, sort)
Apply sort to a cursor. :param cursor: The cursor to apply sort on. :param sort_by: The field name to sort by. :param sort_direction: The direction to sort, "asc" or "desc". :return:
def _encode_payload(data, headers=None): "Wrap data in an SCGI request." prolog = "CONTENT_LENGTH\0%d\0SCGI\x001\0" % len(data) if headers: prolog += _encode_headers(headers) return _encode_netstring(prolog) + data
Wrap data in an SCGI request.
def moist_static_energy(heights, temperature, specific_humidity): r return (dry_static_energy(heights, temperature) + mpconsts.Lv * specific_humidity.to('dimensionless')).to('kJ/kg')
r"""Calculate the moist static energy of parcels. This function will calculate the moist static energy following equation 3.72 in [Hobbs2006]_. Notes ----- .. math::\text{moist static energy} = c_{pd} * T + gz + L_v q * :math:`T` is temperature * :math:`z` is height * :math:`q` is specific humidity Parameters ---------- heights : array-like Atmospheric height temperature : array-like Atmospheric temperature specific_humidity : array-like Atmospheric specific humidity Returns ------- `pint.Quantity` The moist static energy
def add(self, command): self.add_command(command.config) command.set_application(self) return self
Adds a command object.
def satellite(isochrone, kernel, stellar_mass, distance_modulus,**kwargs): mag_1, mag_2 = isochrone.simulate(stellar_mass, distance_modulus) lon, lat = kernel.simulate(len(mag_1)) return mag_1, mag_2, lon, lat
Wrapping the isochrone and kernel simulate functions.
def get_translated_items(fapi, file_uri, use_cache, cache_dir=None): items = None cache_file = os.path.join(cache_dir, sha1(file_uri)) if use_cache else None if use_cache and os.path.exists(cache_file): print("Using cache file %s for translated items for: %s" % (cache_file, file_uri)) items = json.loads(read_from_file(cache_file)) if not items: print("Downloading %s from smartling" % file_uri) (response, code) = fapi.last_modified(file_uri) items = response.data.items if cache_file: print("Caching %s to %s" % (file_uri, cache_file)) write_to_file(cache_file, json.dumps(items)) return items
Returns the last modified from smarterling
def reset_coords(self, names=None, drop=False, inplace=None): inplace = _check_inplace(inplace) if inplace and not drop: raise ValueError('cannot reset coordinates in-place on a ' 'DataArray without ``drop == True``') if names is None: names = set(self.coords) - set(self.dims) dataset = self.coords.to_dataset().reset_coords(names, drop) if drop: if inplace: self._coords = dataset._variables else: return self._replace(coords=dataset._variables) else: if self.name is None: raise ValueError('cannot reset_coords with drop=False ' 'on an unnamed DataArrray') dataset[self.name] = self.variable return dataset
Given names of coordinates, reset them to become variables. Parameters ---------- names : str or list of str, optional Name(s) of non-index coordinates in this dataset to reset into variables. By default, all non-index coordinates are reset. drop : bool, optional If True, remove coordinates instead of converting them into variables. inplace : bool, optional If True, modify this dataset inplace. Otherwise, create a new object. Returns ------- Dataset, or DataArray if ``drop == True``
def getMaxWidth(self, rows): 'Return the maximum length of any cell in column or its header.' w = 0 if len(rows) > 0: w = max(max(len(self.getDisplayValue(r)) for r in rows), len(self.name))+2 return max(w, len(self.name))
Return the maximum length of any cell in column or its header.
def _load_prefix_binding(self): pymux = self.pymux if self._prefix_binding: self.custom_key_bindings.remove_binding(self._prefix_binding) @self.custom_key_bindings.add(*self._prefix, filter= ~(HasPrefix(pymux) | has_focus(COMMAND) | has_focus(PROMPT) | WaitsForConfirmation(pymux))) def enter_prefix_handler(event): " Enter prefix mode. " pymux.get_client_state().has_prefix = True self._prefix_binding = enter_prefix_handler
Load the prefix key binding.
def session(self, auth=None): url = '{server}{auth_url}'.format(**self._options) if isinstance(self._session.auth, tuple) or auth: if not auth: auth = self._session.auth username, password = auth authentication_data = {'username': username, 'password': password} r = self._session.post(url, data=json.dumps(authentication_data)) else: r = self._session.get(url) user = User(self._options, self._session, json_loads(r)) return user
Get a dict of the current authenticated user's session information. :param auth: Tuple of username and password. :type auth: Optional[Tuple[str,str]] :rtype: User
def find_models(self, constructor, constraints=None, *, columns=None, order_by=None, limiting=None, table_name=None): return self._find_models( constructor, table_name or constructor.table_name, constraints, columns=columns, order_by=order_by, limiting=limiting)
Specialization of DataAccess.find_all that returns models instead of cursor objects.
def bandpass_filter_matrix( matrix, tr=1, lowf=0.01, highf=0.1, order = 3): from scipy.signal import butter, filtfilt def butter_bandpass(lowcut, highcut, fs, order ): nyq = 0.5 * fs low = lowcut / nyq high = highcut / nyq b, a = butter(order, [low, high], btype='band') return b, a def butter_bandpass_filter(data, lowcut, highcut, fs, order ): b, a = butter_bandpass(lowcut, highcut, fs, order=order) y = filtfilt(b, a, data) return y fs = 1/tr nsamples = matrix.shape[0] ncolumns = matrix.shape[1] matrixOut = matrix.copy() for k in range( ncolumns ): matrixOut[:,k] = butter_bandpass_filter( matrix[:,k], lowf, highf, fs, order=order ) return matrixOut
Bandpass filter the input time series image ANTsR function: `frequencyFilterfMRI` Arguments --------- image: input time series image tr: sampling time interval (inverse of sampling rate) lowf: low frequency cutoff highf: high frequency cutoff order: order of the butterworth filter run using `filtfilt` Returns ------- filtered matrix Example ------- >>> import numpy as np >>> import ants >>> import matplotlib.pyplot as plt >>> brainSignal = np.random.randn( 400, 1000 ) >>> tr = 1 >>> filtered = ants.bandpass_filter_matrix( brainSignal, tr = tr ) >>> nsamples = brainSignal.shape[0] >>> t = np.linspace(0, tr*nsamples, nsamples, endpoint=False) >>> k = 20 >>> plt.plot(t, brainSignal[:,k], label='Noisy signal') >>> plt.plot(t, filtered[:,k], label='Filtered signal') >>> plt.xlabel('time (seconds)') >>> plt.grid(True) >>> plt.axis('tight') >>> plt.legend(loc='upper left') >>> plt.show()
def build_swagger12_handler(schema): if schema: return SwaggerHandler( op_for_request=schema.validators_for_request, handle_request=handle_request, handle_response=validate_response, )
Builds a swagger12 handler or returns None if no schema is present. :type schema: :class:`pyramid_swagger.model.SwaggerSchema` :rtype: :class:`SwaggerHandler` or None
def param_help_download(self): files = [] for vehicle in ['APMrover2', 'ArduCopter', 'ArduPlane', 'ArduSub', 'AntennaTracker']: url = 'http://autotest.ardupilot.org/Parameters/%s/apm.pdef.xml' % vehicle path = mp_util.dot_mavproxy("%s.xml" % vehicle) files.append((url, path)) url = 'http://autotest.ardupilot.org/%s-defaults.parm' % vehicle if vehicle != 'AntennaTracker': path = mp_util.dot_mavproxy("%s-defaults.parm" % vehicle) files.append((url, path)) try: child = multiproc.Process(target=mp_util.download_files, args=(files,)) child.start() except Exception as e: print(e)
download XML files for parameters
def total_misses(self, filename=None): if filename is not None: return len(self.missed_statements(filename)) total = 0 for filename in self.files(): total += len(self.missed_statements(filename)) return total
Return the total number of uncovered statements for the file `filename`. If `filename` is not given, return the total number of uncovered statements for all files.
def _filter(self, text): self.markdown.reset() return self.markdown.convert(text)
Filter markdown.
def load_global_catalog(): cat_dir = global_data_dir() if not os.path.isdir(cat_dir): return Catalog() else: return YAMLFilesCatalog(cat_dir)
Return a catalog for the environment-specific Intake directory
def format_kwargs(attrs, params): attrs_mapping = {'cell_methods': {'YS': 'years', 'MS': 'months'}, 'long_name': {'YS': 'Annual', 'MS': 'Monthly'}} for key, val in attrs.items(): mba = {} for k, v in params.items(): if isinstance(v, six.string_types) and v in attrs_mapping.get(key, {}).keys(): mba[k] = '{' + v + '}' else: mba[k] = v attrs[key] = val.format(**mba).format(**attrs_mapping.get(key, {}))
Modify attribute with argument values. Parameters ---------- attrs : dict Attributes to be assigned to function output. The values of the attributes in braces will be replaced the the corresponding args values. params : dict A BoundArguments.arguments dictionary storing a function's arguments.
def main(): if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-f' in sys.argv: dat=[] ind=sys.argv.index('-f') file=sys.argv[ind+1] else: file = sys.stdin ofile="" if '-F' in sys.argv: ind = sys.argv.index('-F') ofile= sys.argv[ind+1] out = open(ofile, 'w + a') DIIs=numpy.loadtxt(file,dtype=numpy.float) vpars,R=pmag.vector_mean(DIIs) outstring='%7.1f %7.1f %10.3e %i'%(vpars[0],vpars[1],R,len(DIIs)) if ofile == "": print(outstring) else: out.write(outstring + "\n")
NAME vector_mean.py DESCRIPTION calculates vector mean of vector data INPUT FORMAT takes dec, inc, int from an input file SYNTAX vector_mean.py [command line options] [< filename] OPTIONS -h prints help message and quits -f FILE, specify input file -F FILE, specify output file < filename for reading from standard input OUTPUT mean dec, mean inc, R, N
def request_middleware(api=None): def decorator(middleware_method): apply_to_api = hug.API(api) if api else hug.api.from_object(middleware_method) class MiddlewareRouter(object): __slots__ = () def process_request(self, request, response): return middleware_method(request, response) apply_to_api.http.add_middleware(MiddlewareRouter()) return middleware_method return decorator
Registers a middleware function that will be called on every request
def consolidate_args(args): if not hasattr(args, 'hex_limit'): return active_plugins = {} is_using_default_value = {} for plugin in PluginOptions.all_plugins: arg_name = PluginOptions._convert_flag_text_to_argument_name( plugin.disable_flag_text, ) is_disabled = getattr(args, arg_name, False) delattr(args, arg_name) if is_disabled: continue related_args = {} for related_arg_tuple in plugin.related_args: try: flag_name, default_value = related_arg_tuple except ValueError: flag_name = related_arg_tuple default_value = None arg_name = PluginOptions._convert_flag_text_to_argument_name( flag_name, ) related_args[arg_name] = getattr(args, arg_name) delattr(args, arg_name) if default_value and related_args[arg_name] is None: related_args[arg_name] = default_value is_using_default_value[arg_name] = True active_plugins.update({ plugin.classname: related_args, }) args.plugins = active_plugins args.is_using_default_value = is_using_default_value
There are many argument fields related to configuring plugins. This function consolidates all of them, and saves the consolidated information in args.plugins. Note that we're deferring initialization of those plugins, because plugins may have various initialization values, referenced in different places. :param args: output of `argparse.ArgumentParser.parse_args`
def get_sqlite_core(connection_string, *, cursor_factory=None, edit_connection=None): import sqlite3 as sqlite def opener(): cn = sqlite.connect(connection_string) if cursor_factory: cn.row_factory = cursor_factory if edit_connection: edit_connection(cn) return cn return InjectedDataAccessCore( opener, default_connection_closer, (":{0}", "?", SQL_CAST), empty_params=[], supports_timezones=True, supports_returning_syntax=False, get_autocommit=get_sqlite_autocommit, set_autocommit=set_sqlite_autocommit)
Creates a simple SQLite3 core.
def generate_data_for_create_page(self): if not self.can_create: return {} if self.create_form: return self.create_form.to_dict() return self.generate_simple_data_page()
Generate a custom representation of table's fields in dictionary type if exist create form else use default representation. :return: dict
def read_data(filename, data_format=None): if not os.path.exists(filename): raise ValueError('Filename {} does not exist'.format(filename)) if not isinstance(data_format, MimeType): data_format = get_data_format(filename) if data_format.is_tiff_format(): return read_tiff_image(filename) if data_format is MimeType.JP2: return read_jp2_image(filename) if data_format.is_image_format(): return read_image(filename) try: return { MimeType.TXT: read_text, MimeType.CSV: read_csv, MimeType.JSON: read_json, MimeType.XML: read_xml, MimeType.GML: read_xml, MimeType.SAFE: read_xml }[data_format](filename) except KeyError: raise ValueError('Reading data format .{} is not supported'.format(data_format.value))
Read image data from file This function reads input data from file. The format of the file can be specified in ``data_format``. If not specified, the format is guessed from the extension of the filename. :param filename: filename to read data from :type filename: str :param data_format: format of filename. Default is ``None`` :type data_format: MimeType :return: data read from filename :raises: exception if filename does not exist
def absolutify(url): site_url = getattr(settings, 'SITE_URL', False) if not site_url: protocol = settings.PROTOCOL hostname = settings.DOMAIN port = settings.PORT if (protocol, port) in (('https://', 443), ('http://', 80)): site_url = ''.join(map(str, (protocol, hostname))) else: site_url = ''.join(map(str, (protocol, hostname, ':', port))) return site_url + url
Takes a URL and prepends the SITE_URL
def detectBlackBerry(self): return UAgentInfo.deviceBB in self.__userAgent \ or UAgentInfo.vndRIM in self.__httpAccept
Return detection of Blackberry Detects if the current browser is any BlackBerry. Includes the PlayBook.
def get_config(self): config = { 'location': self.location, 'language': self.language, 'topic': self.topic, } return config
function to get current configuration
def get_raw_token(self, header): parts = header.split() if len(parts) == 0: return None if parts[0] not in AUTH_HEADER_TYPE_BYTES: return None if len(parts) != 2: raise AuthenticationFailed( _('Authorization header must contain two space-delimited values'), code='bad_authorization_header', ) return parts[1]
Extracts an unvalidated JSON web token from the given "Authorization" header value.
def tensor(self, field_name, tensor_ind): if tensor_ind == self._tensor_cache_file_num[field_name]: return self._tensors[field_name] filename = self.generate_tensor_filename(field_name, tensor_ind, compressed=True) Tensor.load(filename, compressed=True, prealloc=self._tensors[field_name]) self._tensor_cache_file_num[field_name] = tensor_ind return self._tensors[field_name]
Returns the tensor for a given field and tensor index. Parameters ---------- field_name : str the name of the field to load tensor_index : int the index of the tensor Returns ------- :obj:`Tensor` the desired tensor
def _detect_gamepads(self): state = XinputState() for device_number in range(4): res = self.xinput.XInputGetState( device_number, ctypes.byref(state)) if res == XINPUT_ERROR_SUCCESS: device_path = ( "/dev/input/by_id/" + "usb-Microsoft_Corporation_Controller_%s-event-joystick" % device_number) self.gamepads.append(GamePad(self, device_path)) continue if res != XINPUT_ERROR_DEVICE_NOT_CONNECTED: raise RuntimeError( "Unknown error %d attempting to get state of device %d" % (res, device_number))
Find gamepads.
def render_check_and_set_platforms(self): phase = 'prebuild_plugins' plugin = 'check_and_set_platforms' if not self.pt.has_plugin_conf(phase, plugin): return if self.user_params.koji_target.value: self.pt.set_plugin_arg(phase, plugin, "koji_target", self.user_params.koji_target.value)
If the check_and_set_platforms plugin is present, configure it
def manage_mep(self, mep_json): responses = representative_pre_import.send(sender=self, representative_data=mep_json) for receiver, response in responses: if response is False: logger.debug( 'Skipping MEP %s', mep_json['Name']['full']) return changed = False slug = slugify('%s-%s' % ( mep_json["Name"]["full"] if 'full' in mep_json["Name"] else mep_json["Name"]["sur"] + " " + mep_json["Name"]["family"], _parse_date(mep_json["Birth"]["date"]) )) try: representative = Representative.objects.get(slug=slug) except Representative.DoesNotExist: representative = Representative(slug=slug) changed = True self.import_representative_details(representative, mep_json, changed) self.add_mandates(representative, mep_json) self.add_contacts(representative, mep_json) logger.debug('Imported MEP %s', unicode(representative)) return representative
Import a mep as a representative from the json dict fetched from parltrack
def long_form_multiple_formats(jupytext_formats, metadata=None): if not jupytext_formats: return [] if not isinstance(jupytext_formats, list): jupytext_formats = [fmt for fmt in jupytext_formats.split(',') if fmt] jupytext_formats = [long_form_one_format(fmt, metadata) for fmt in jupytext_formats] return jupytext_formats
Convert a concise encoding of jupytext.formats to a list of formats, encoded as dictionaries
def _generate_initial_model(self): initial_parameters = [p.current_value for p in self.current_parameters] try: initial_model = self.specification(*initial_parameters) except TypeError: raise TypeError( 'Failed to build initial model. Make sure that the input ' 'parameters match the number and order of arguements ' 'expected by the input specification.') initial_model.pack_new_sequences(self.sequences) self.current_energy = self.eval_function(initial_model) self.best_energy = copy.deepcopy(self.current_energy) self.best_parameters = copy.deepcopy(self.current_parameters) self.best_model = initial_model return
Creates the initial model for the optimistation. Raises ------ TypeError Raised if the model failed to build. This could be due to parameters being passed to the specification in the wrong format.
def GetNetworks(alias=None,location=None): if alias is None: alias = clc.v1.Account.GetAlias() if location is None: location = clc.v1.Account.GetLocation() r = clc.v1.API.Call('post','Network/GetAccountNetworks', { 'AccountAlias': alias, 'Location': location }) if int(r['StatusCode']) == 0: return(r['Networks'])
Gets the list of Networks mapped to the account in the specified datacenter. https://t3n.zendesk.com/entries/21024721-Get-Networks :param alias: short code for a particular account. If none will use account's default alias :param location: datacenter where group resides. If none will use account's primary datacenter
def construct_s3_location_object(location_uri, logical_id, property_name): if isinstance(location_uri, dict): if not location_uri.get("Bucket") or not location_uri.get("Key"): raise InvalidResourceException(logical_id, "'{}' requires Bucket and Key properties to be " "specified".format(property_name)) s3_pointer = location_uri else: s3_pointer = parse_s3_uri(location_uri) if s3_pointer is None: raise InvalidResourceException(logical_id, '\'{}\' is not a valid S3 Uri of the form ' '"s3://bucket/key" with optional versionId query ' 'parameter.'.format(property_name)) code = { 'S3Bucket': s3_pointer['Bucket'], 'S3Key': s3_pointer['Key'] } if 'Version' in s3_pointer: code['S3ObjectVersion'] = s3_pointer['Version'] return code
Constructs a Lambda `Code` or `Content` property, from the SAM `CodeUri` or `ContentUri` property. This follows the current scheme for Lambda Functions and LayerVersions. :param dict or string location_uri: s3 location dict or string :param string logical_id: logical_id of the resource calling this function :param string property_name: name of the property which is used as an input to this function. :returns: a Code dict, containing the S3 Bucket, Key, and Version of the Lambda layer code :rtype: dict
def _pys_assert_version(self, line): if float(line.strip()) > 1.0: msg = _("File version {version} unsupported (>1.0).").format( version=line.strip()) raise ValueError(msg)
Asserts pys file version
def split_result_of_axis_func_pandas(axis, num_splits, result, length_list=None): if num_splits == 1: return result if length_list is not None: length_list.insert(0, 0) sums = np.cumsum(length_list) if axis == 0: return [result.iloc[sums[i] : sums[i + 1]] for i in range(len(sums) - 1)] else: return [result.iloc[:, sums[i] : sums[i + 1]] for i in range(len(sums) - 1)] chunksize = compute_chunksize(result, num_splits, axis=axis) if axis == 0: return [ result.iloc[chunksize * i : chunksize * (i + 1)] for i in range(num_splits) ] else: return [ result.iloc[:, chunksize * i : chunksize * (i + 1)] for i in range(num_splits) ]
Split the Pandas result evenly based on the provided number of splits. Args: axis: The axis to split across. num_splits: The number of even splits to create. result: The result of the computation. This should be a Pandas DataFrame. length_list: The list of lengths to split this DataFrame into. This is used to return the DataFrame to its original partitioning schema. Returns: A list of Pandas DataFrames.
def load_from_file(self, path): with open(path) as inf: data = inf.read() if data: items = json.loads(data) else: items = {} for item in items: extra = dict((x, y) for x, y in item.items() if x not in ['name', 'value', 'domain']) self.set(item['name'], item['value'], item['domain'], **extra)
Load cookies from the file. Content of file should be a JSON-serialized list of dicts.
def pdf(self, resource_id): self.resource_id(str(resource_id)) self._request_uri = '{}/pdf'.format(self._request_uri)
Update the request URI to get the pdf for this resource. Args: resource_id (integer): The group id.
def removeAllChildrenAtIndex(self, parentIndex): if not parentIndex.isValid(): logger.debug("No valid item selected for deletion (ignored).") return parentItem = self.getItem(parentIndex, None) logger.debug("Removing children of {!r}".format(parentItem)) assert parentItem, "parentItem not found" self.beginRemoveRows(parentIndex, 0, parentItem.nChildren()-1) try: parentItem.removeAllChildren() finally: self.endRemoveRows() logger.debug("removeAllChildrenAtIndex completed")
Removes all children of the item at the parentIndex. The children's finalize method is called before removing them to give them a chance to close their resources
def _init_ws(n_items, comparisons, prior_inv, tau, nu): prec = np.zeros((n_items, n_items)) xs = np.zeros(n_items) for i, (a, b) in enumerate(comparisons): prec[(a, a, b, b), (a, b, a, b)] += tau[i] * MAT_ONE_FLAT xs[a] += nu[i] xs[b] -= nu[i] cov = inv_posdef(prior_inv + prec) mean = cov.dot(xs) return mean, cov, xs , prec
Initialize parameters in the weight space.
def sign(self, h): if not self.is_private(): raise RuntimeError("Key must be private to be able to sign") val = from_bytes_32(h) r, s = self._generator.sign(self.secret_exponent(), val) return sigencode_der(r, s)
Return a der-encoded signature for a hash h. Will throw a RuntimeError if this key is not a private key
def store_checksums(dataset_name, sizes_checksums): path = _get_path(dataset_name) original_data = _get_sizes_checksums(path) new_data = original_data.copy() new_data.update(sizes_checksums) if original_data == new_data: return with tf.io.gfile.GFile(path, 'w') as f: for url, (size, checksum) in sorted(new_data.items()): f.write('%s %s %s\n' % (url, size, checksum))
Store given checksums and sizes for specific dataset. Content of file is never disgarded, only updated. This is to ensure that if process is killed right after first download finishes, checksums registered during previous runs aren't lost. It is the responsibility of the caller not to call function multiple times in parallel for a given dataset. Only original file content is updated. This means the entire set of new sizes and checksums must be given at every call. Args: dataset_name: string. sizes_checksums: dict, {url: (size_in_bytes, checksum)}.
def prepare_release(ver=None): write_changelog(True) if ver is None: ver = next_release() print('saving updates to ChangeLog') run('git commit ChangeLog -m "[RELEASE] Update to version v{}"'.format(ver), hide=True) sha = run('git log -1 --pretty=format:"%h"', hide=True).stdout run('git tag -a "{ver}" -m "version {ver}" {sha}'.format(ver=ver, sha=sha), hide=True) package() write_changelog() run('git tag -d {}'.format(ver), hide=True) run('git commit --all --amend --no-edit', hide=True)
Prepare release artifacts
def pop_key(self, arg, key, *args, **kwargs): return self.unfinished_arguments[arg].pop(key, *args, **kwargs)
Delete a previously defined key for the `add_argument`
def dump(self, output, close_after_write=True): try: output.write self.stream = output except AttributeError: self.stream = io.open(output, "w", encoding="utf-8") try: self.write_table() finally: if close_after_write: self.stream.close() self.stream = sys.stdout
Write data to the output with tabular format. Args: output (file descriptor or str): file descriptor or path to the output file. close_after_write (bool, optional): Close the output after write. Defaults to |True|.
def _create_batches(self, instances: Iterable[Instance], shuffle: bool) -> Iterable[Batch]: raise NotImplementedError
This method should return one epoch worth of batches.
def reload(self, index): finfo = self.data[index] txt, finfo.encoding = encoding.read(finfo.filename) finfo.lastmodified = QFileInfo(finfo.filename).lastModified() position = finfo.editor.get_position('cursor') finfo.editor.set_text(txt) finfo.editor.document().setModified(False) finfo.editor.document().changed_since_autosave = False finfo.editor.set_cursor_position(position) finfo.editor.rehighlight() self._refresh_outlineexplorer(index)
Reload file from disk
def is_client_ip_address_blacklisted(request: AxesHttpRequest) -> bool: if is_ip_address_in_blacklist(request.axes_ip_address): return True if settings.AXES_ONLY_WHITELIST and not is_ip_address_in_whitelist(request.axes_ip_address): return True return False
Check if the given request refers to a blacklisted IP.
def bind(self, **config): while self.unbound_types: typedef = self.unbound_types.pop() try: load, dump = typedef.bind(self, **config) self.bound_types[typedef] = { "load": load, "dump": dump } except Exception: self.unbound_types.add(typedef) raise
Bind all unbound types to the engine. Bind each unbound typedef to the engine, passing in the engine and :attr:`config`. The resulting ``load`` and ``dump`` functions can be found under ``self.bound_types[typedef]["load"]`` and ``self.bound_types[typedef]["dump"], respectively. Parameters ---------- config : dict, optional Engine-binding configuration to pass to each typedef that will be bound. Examples include floating-point precision values, maximum lengths for strings, or any other translation constraints/settings that a typedef needs to construct a load/dump function pair.
def ping(self): if not self.conn: self.connect() self.conn.send('PING', time.time()) cmd, payload = self.conn.recv() recv_ts = time.time() if cmd != 'PONG': raise Exception("Invalid response from server") return recv_ts - payload[0]
Ping the server. Returns the time interval, in seconds, required for the server to respond to the PING message.
def load_from_json(db_file, language=DEFAULT_LANG): raw = json.loads(file(db_file).read()) data = { '_id': raw['id'], 'title': raw['title'], 'description': DBVuln.handle_ref(raw['description'], language=language), 'severity': raw['severity'], 'wasc': raw.get('wasc', []), 'tags': raw.get('tags', []), 'cwe': raw.get('cwe', []), 'owasp_top_10': raw.get('owasp_top_10', {}), 'fix_effort': raw['fix']['effort'], 'fix_guidance': DBVuln.handle_ref(raw['fix']['guidance'], language=language), 'references': DBVuln.handle_references(raw.get('references', [])), 'db_file': db_file, } return data
Parses the JSON data and returns it :param db_file: File and path pointing to the JSON file to parse :param language: The user's language (en, es, etc.) :raises: All kind of exceptions if the file doesn't exist or JSON is invalid. :return: None
def out(self): out = "" if self.use_sentinel: out += sentinel_var + " = _coconut.object()\n" closes = 0 for checks, defs in self.checkdefs: if checks: out += "if " + paren_join(checks, "and") + ":\n" + openindent closes += 1 if defs: out += "\n".join(defs) + "\n" return out + ( self.check_var + " = True\n" + closeindent * closes + "".join(other.out() for other in self.others) + ( "if " + self.check_var + " and not (" + paren_join(self.guards, "and") + "):\n" + openindent + self.check_var + " = False\n" + closeindent if self.guards else "" ) )
Return pattern-matching code.
def _configure_logger_handler(cls, log_dest, log_filename): if log_dest is None: return None msg_format = '%(asctime)s-%(name)s-%(message)s' if log_dest == 'stderr': handler = logging.StreamHandler() handler.setFormatter(logging.Formatter(msg_format)) elif log_dest == 'file': if not log_filename: raise ValueError("Log filename is required if log destination " "is 'file'") handler = logging.FileHandler(log_filename, encoding="UTF-8") handler.setFormatter(logging.Formatter(msg_format)) else: raise ValueError( _format("Invalid log destination: {0!A}; Must be one of: " "{1!A}", log_dest, LOG_DESTINATIONS)) return handler
Return a logging handler for the specified `log_dest`, or `None` if `log_dest` is `None`.
def log(self, level, msg, *args, **kwargs): if level >= logging.FATAL: extra = kwargs.setdefault('extra', {}) extra[_ABSL_LOG_FATAL] = True super(ABSLLogger, self).log(level, msg, *args, **kwargs)
Logs a message at a cetain level substituting in the supplied arguments. This method behaves differently in python and c++ modes. Args: level: int, the standard logging level at which to log the message. msg: str, the text of the message to log. *args: The arguments to substitute in the message. **kwargs: The keyword arguments to substitute in the message.
def find_vm_by_name(self, si, path, name): return self.find_obj_by_path(si, path, name, self.VM)
Finds vm in the vCenter or returns "None" :param si: pyvmomi 'ServiceInstance' :param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...') :param name: the vm name to return
def add_net(self, net): self.sanity_check_net(net) self.logic.add(net)
Add a net to the logic of the block. The passed net, which must be of type LogicNet, is checked and then added to the block. No wires are added by this member, they must be added seperately with add_wirevector.
def _media(self): css = ['markymark/css/markdown-editor.css'] iconlibrary_css = getattr( settings, 'MARKYMARK_FONTAWESOME_CSS', 'markymark/fontawesome/fontawesome.min.css' ) if iconlibrary_css: css.append(iconlibrary_css) media = forms.Media( css={'all': css}, js=('markymark/js/markdown-editor.js',) ) renderer = initialize_renderer() for extension in renderer.registeredExtensions: if hasattr(extension, 'media'): media += extension.media return media
Returns a forms.Media instance with the basic editor media and media from all registered extensions.
def initrepo(repopath, bare, shared): ag = activegit.ActiveGit(repopath, bare=bare, shared=shared)
Initialize an activegit repo. Default makes base shared repo that should be cloned for users
def sysmeta_add_preferred(sysmeta_pyxb, node_urn): if not has_replication_policy(sysmeta_pyxb): sysmeta_set_default_rp(sysmeta_pyxb) rp_pyxb = sysmeta_pyxb.replicationPolicy _add_node(rp_pyxb, 'pref', node_urn) _remove_node(rp_pyxb, 'block', node_urn)
Add a remote Member Node to the list of preferred replication targets to this System Metadata object. Also remove the target MN from the list of blocked Member Nodes if present. If the target MN is already in the preferred list and not in the blocked list, this function is a no-op. Args: sysmeta_pyxb : SystemMetadata PyXB object. System Metadata in which to add the preferred replication target. If the System Metadata does not already have a Replication Policy, a default replication policy which enables replication is added and populated with the preferred replication target. node_urn : str Node URN of the remote MN that will be added. On the form ``urn:node:MyMemberNode``.
def output_json(gandi, format, value): if format == 'json': gandi.echo(json.dumps(value, default=date_handler, sort_keys=True)) elif format == 'pretty-json': gandi.echo(json.dumps(value, default=date_handler, sort_keys=True, indent=2, separators=(',', ': ')))
Helper to show json output
def convert_field_to_html(cr, table, field_name, html_field_name): if version_info[0] < 7: logger.error("You cannot use this method in an OpenUpgrade version " "prior to 7.0.") return cr.execute( "SELECT id, %(field)s FROM %(table)s WHERE %(field)s IS NOT NULL" % { 'field': field_name, 'table': table, } ) for row in cr.fetchall(): logged_query( cr, "UPDATE %(table)s SET %(field)s = %%s WHERE id = %%s" % { 'field': html_field_name, 'table': table, }, (plaintext2html(row[1]), row[0]) )
Convert field value to HTML value. .. versionadded:: 7.0
def check_tweet(tweet, validation_checking=False): if "id" not in tweet: raise NotATweetError("This text has no 'id' key") original_format = is_original_format(tweet) if original_format: _check_original_format_tweet(tweet, validation_checking=validation_checking) else: _check_activity_streams_tweet(tweet, validation_checking=validation_checking) return original_format
Ensures a tweet is valid and determines the type of format for the tweet. Args: tweet (dict/Tweet): the tweet payload validation_checking (bool): check for valid key structure in a tweet.
def adopt(self): valid_relationships = set(Relationship._instances.keys()) relationships = [ (parent, relation.complement(), term.id) for term in six.itervalues(self.terms) for relation in term.relations for parent in term.relations[relation] if relation.complementary and relation.complementary in valid_relationships ] relationships.sort(key=operator.itemgetter(2)) for parent, rel, child in relationships: if rel is None: break try: parent = parent.id except AttributeError: pass if parent in self.terms: try: if child not in self.terms[parent].relations[rel]: self.terms[parent].relations[rel].append(child) except KeyError: self[parent].relations[rel] = [child] del relationships
Make terms aware of their children. This is done automatically when using the `~Ontology.merge` and `~Ontology.include` methods as well as the `~Ontology.__init__` method, but it should be called in case of manual editing of the parents or children of a `Term`.
def ring_coding(array): n = len(array) codes = np.ones(n, dtype=Path.code_type) * Path.LINETO codes[0] = Path.MOVETO codes[-1] = Path.CLOSEPOLY return codes
Produces matplotlib Path codes for exterior and interior rings of a polygon geometry.
def next(self): if not self.cursor: self.cursor = self.coll_handle.find().sort([("ts", ASCENDING)]) doc = self.cursor.next() doc['thread'] = self.name le = LogEvent(doc) return le
Make iterators.
def _ConvertBool(value, require_str): if require_str: if value == 'true': return True elif value == 'false': return False else: raise ParseError('Expected "true" or "false", not {0}.'.format(value)) if not isinstance(value, bool): raise ParseError('Expected true or false without quotes.') return value
Convert a boolean value. Args: value: A scalar value to convert. require_str: If True, value must be a str. Returns: The bool parsed. Raises: ParseError: If a boolean value couldn't be consumed.
def stop_app(self, callback_function_param=False): self.logger.info("Receiver:Stopping current app '%s'", self.app_id) return self.send_message( {MESSAGE_TYPE: 'STOP'}, inc_session_id=True, callback_function=callback_function_param)
Stops the current running app on the Chromecast.
def getBehavior(name, id=None): name = name.upper() if name in __behaviorRegistry: if id: for n, behavior in __behaviorRegistry[name]: if n == id: return behavior return __behaviorRegistry[name][0][1] return None
Return a matching behavior if it exists, or None. If id is None, return the default for name.
def check_predefined_conditions(): try: node_info = current_k8s_corev1_api_client.list_node() for node in node_info.items: for condition in node.status.conditions: if not condition.status: return False except ApiException as e: log.error('Something went wrong while getting node information.') log.error(e) return False return True
Check k8s predefined conditions for the nodes.
def check(): dist_path = Path(DIST_PATH) if not dist_path.exists() or not list(dist_path.glob('*')): print("No distribution files found. Please run 'build' command first") return subprocess.check_call(['twine', 'check', 'dist/*'])
Checks the long description.
def get_previous_character(self): cursor = self.textCursor() cursor.movePosition(QTextCursor.PreviousCharacter, QTextCursor.KeepAnchor) return cursor.selectedText()
Returns the character before the cursor. :return: Previous cursor character. :rtype: QString
def dispatch(argdict): cmd = argdict['command'] ftc = getattr(THIS_MODULE, 'do_'+cmd) ftc(argdict)
Call the command-specific function, depending on the command.
def _reset_errors(self, msg=None): if msg is not None and msg in self._errors: del self._errors[msg] else: self._errors = {}
Resets the logging throttle cache, so the next error is emitted regardless of the value in `self.server_error_interval` :param msg: if present, only this key is reset. Otherwise, the whole cache is cleaned.
def _cli_main(args=None): arguments = _parse_arguments(args) _remove_none_values(arguments) verbosity = min(arguments.pop('verbose'), 4) levels = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG, TRACE_LEVEL] arguments.setdefault('debug_level', levels[verbosity]) with open_tunnel(**arguments) as tunnel: if tunnel.is_alive: input_( )
Pass input arguments to open_tunnel Mandatory: ssh_address, -R (remote bind address list) Optional: -U (username) we may gather it from SSH_CONFIG_FILE or current username -p (server_port), defaults to 22 -P (password) -L (local_bind_address), default to 0.0.0.0:22 -k (ssh_host_key) -K (private_key_file), may be gathered from SSH_CONFIG_FILE -S (private_key_password) -t (threaded), allow concurrent connections over tunnels -v (verbose), up to 3 (-vvv) to raise loglevel from ERROR to DEBUG -V (version) -x (proxy), ProxyCommand's IP:PORT, may be gathered from config file -c (ssh_config), ssh configuration file (defaults to SSH_CONFIG_FILE) -z (compress) -n (noagent), disable looking for keys from an Agent -d (host_pkey_directories), look for keys on these folders
def straight_line_show(title, length=100, linestyle="=", pad=0): print(StrTemplate.straight_line( title=title, length=length, linestyle=linestyle, pad=pad))
Print a formatted straight line.
def __set_rate_type(self, value): if value not in [RATE_TYPE_FIXED, RATE_TYPE_PERCENTAGE]: raise ValueError("Invalid rate type.") self.__rate_type = value
Sets the rate type. @param value:str
async def run_action(self, action_name, **params): action_facade = client.ActionFacade.from_connection(self.connection) log.debug('Starting action `%s` on %s', action_name, self.name) res = await action_facade.Enqueue([client.Action( name=action_name, parameters=params, receiver=self.tag, )]) action = res.results[0].action error = res.results[0].error if error and error.code == 'not found': raise ValueError('Action `%s` not found on %s' % (action_name, self.name)) elif error: raise Exception('Unknown action error: %s' % error.serialize()) action_id = action.tag[len('action-'):] log.debug('Action started as %s', action_id) return await self.model._wait_for_new('action', action_id)
Run an action on this unit. :param str action_name: Name of action to run :param **params: Action parameters :returns: A :class:`juju.action.Action` instance. Note that this only enqueues the action. You will need to call ``action.wait()`` on the resulting `Action` instance if you wish to block until the action is complete.
def update(self): ring = self._fetch() n_replicas = len(ring) replica_set = set([r[1] for r in self.replicas]) self.ranges = [] for n, (start, replica) in enumerate(ring): if replica in replica_set: end = ring[(n+1) % n_replicas][0] % RING_SIZE if start < end: self.ranges.append((start, end)) elif end < start: self.ranges.append((start, RING_SIZE)) self.ranges.append((0, end)) else: self.ranges.append((0, RING_SIZE))
Fetches the updated ring from Redis and updates the current ranges.
def validate(self, graph): if not nx.is_directed_acyclic_graph(graph): raise DirectedAcyclicGraphInvalid(graph_name=self._name)
Validate the graph by checking whether it is a directed acyclic graph. Args: graph (DiGraph): Reference to a DiGraph object from NetworkX. Raises: DirectedAcyclicGraphInvalid: If the graph is not a valid dag.
def validate_request(self, data: Any, *additional: AnyMapping, merged_class: Type[dict] = dict) -> Any: r request_schema = getattr(self.module, 'request', None) if request_schema is None: logger.error( 'Request schema should be defined', extra={'schema_module': self.module, 'schema_module_attrs': dir(self.module)}) raise self.make_error('Request schema should be defined') if isinstance(data, dict) and additional: data = merged_class(self._merge_data(data, *additional)) try: self._validate(data, request_schema) finally: self._valid_request = False self._valid_request = True processor = getattr(self.module, 'request_processor', None) return processor(data) if processor else data
r"""Validate request data against request schema from module. :param data: Request data. :param \*additional: Additional data dicts to be merged with base request data. :param merged_class: When additional data dicts supplied method by default will return merged **dict** with all data, but you can customize things to use read-only dict or any other additional class or callable.
def glyph_metrics_stats(ttFont): glyph_metrics = ttFont['hmtx'].metrics ascii_glyph_names = [ttFont.getBestCmap()[c] for c in range(32, 128) if c in ttFont.getBestCmap()] ascii_widths = [adv for name, (adv, lsb) in glyph_metrics.items() if name in ascii_glyph_names] ascii_width_count = Counter(ascii_widths) ascii_most_common_width = ascii_width_count.most_common(1)[0][1] seems_monospaced = ascii_most_common_width >= len(ascii_widths) * 0.8 width_max = max([adv for k, (adv, lsb) in glyph_metrics.items()]) most_common_width = Counter(glyph_metrics.values()).most_common(1)[0][0][0] return { "seems_monospaced": seems_monospaced, "width_max": width_max, "most_common_width": most_common_width, }
Returns a dict containing whether the font seems_monospaced, what's the maximum glyph width and what's the most common width. For a font to be considered monospaced, at least 80% of the ascii glyphs must have the same width.
def print_version(self): if not self._version: return self if not self._title: print(' %s %s' % (self._name, self._version)) return self print(' %s (%s %s)' % (self._title, self._name, self._version)) return self
Print the program version.
def find_tag_by_name(repo, tag_name, safe=True): tagfmt = 'tags/{ref}'.format(ref=tag_name) try: ref = repo.get_git_ref(tagfmt) if ref and ref.ref: return ref except github.UnknownObjectException: if not safe: raise return None
Find tag by name in a github Repository Parameters ---------- repo: :class:`github.Repository` instance tag_name: str Short name of tag (not a fully qualified ref). safe: bool, optional Defaults to `True`. When `True`, `None` is returned on failure. When `False`, an exception will be raised upon failure. Returns ------- gh : :class:`github.GitRef` instance or `None` Raises ------ github.UnknownObjectException If git tag name does not exist in repo.
def init_attachment_cache(self): if self.request.method == 'GET': attachments_cache.delete(self.get_attachments_cache_key(self.request)) return attachments_cache_key = self.get_attachments_cache_key(self.request) restored_attachments_dict = attachments_cache.get(attachments_cache_key) if restored_attachments_dict: restored_attachments_dict.update(self.request.FILES) self.request._files = restored_attachments_dict if self.request.FILES: attachments_cache.set(attachments_cache_key, self.request.FILES)
Initializes the attachment cache for the current view.
def recycle(): for service in cache.iter_keys('th_*'): try: service_value = cache.get(service, version=2) cache.set(service, service_value) cache.delete_pattern(service, version=2) except ValueError: pass logger.info('recycle of cache done!')
the purpose of this tasks is to recycle the data from the cache with version=2 in the main cache
def cmd(self, argv): assert isinstance(argv, (list, tuple)), \ "'argv' is not a sequence: %r" % argv retval = None try: argv = self.precmd(argv) retval = self.onecmd(argv) self.postcmd(argv) except: if not self.cmdexc(argv): raise retval = 1 return retval
Run one command and exit. "argv" is the arglist for the command to run. argv[0] is the command to run. If argv is an empty list then the 'emptyline' handler is run. Returns the return value from the command handler.
def add_nodes(self, nodes, attr_dict=None, **attr): attr_dict = self._combine_attribute_arguments(attr_dict, attr) for node in nodes: if type(node) is tuple: new_node, node_attr_dict = node new_dict = attr_dict.copy() new_dict.update(node_attr_dict) self.add_node(new_node, new_dict) else: self.add_node(node, attr_dict.copy())
Adds multiple nodes to the graph, along with any related attributes of the nodes. :param nodes: iterable container to either references of the nodes OR tuples of (node reference, attribute dictionary); if an attribute dictionary is provided in the tuple, its values will override both attr_dict's and attr's values. :param attr_dict: dictionary of attributes shared by all the nodes. :param attr: keyword arguments of attributes of the node; attr's values will override attr_dict's values if both are provided. See also: add_node Examples: :: >>> H = DirectedHypergraph() >>> attributes = {label: "positive"} >>> node_list = ["A", ("B", {label="negative"}), ("C", {root=True})] >>> H.add_nodes(node_list, attributes)
def _printable_id_code(self): code = super(ISWCCode, self)._printable_id_code() code1 = code[:3] code2 = code[3:6] code3 = code[-3:] return '%s.%s.%s' % (code1, code2, code3)
Returns the code in a printable form, separating it into groups of three characters using a point between them. :return: the ID code in a printable form
def can_add_new_content(self, block, file_info): return ((self._max_files_per_container == 0 or self._max_files_per_container > len(block.content_file_infos)) and (self.does_content_fit(file_info, block) or (block.content_size < self._max_container_content_size_in_bytes and (self._should_split_small_files or not self._is_small_file(file_info)))))
new content from file_info can be added into block iff - file count limit hasn't been reached for the block - there is enough space to completely fit the info into the block - OR the info can be split and some info can fit into the block
def as_dict(self, use_preliminary=False): config = dict() for key in self.config.keys: if use_preliminary and key in self.preliminary_config: value = self.preliminary_config[key] else: value = self.config.get_config_value(key) config[key] = value return config
Create a copy of the config in form of a dict :param bool use_preliminary: Whether to include the preliminary config :return: A dict with the copy of the config :rtype: dict
def info(verbose): if _get_mongopatcher().manifest.is_initialized(): print('Datamodel version: %s' % _get_mongopatcher().manifest.version) if verbose: print('\nUpdate history:') for update in reversed(_get_mongopatcher().manifest.history): reason = update.get('reason') reason = '(%s)' % reason if reason else '' print(' - %s: %s %s' % (update['timestamp'], update['version'], reason)) else: print('Datamodel is not initialized')
Show version of the datamodel
def get_es(self, default_builder=get_es): return super(S, self).get_es(default_builder=default_builder)
Returns the elasticsearch Elasticsearch object to use. This uses the django get_es builder by default which takes into account settings in ``settings.py``.