code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def supports_gzip(self, context): if 'request' in context and client.supports_gzip(): enc = context['request'].META.get('HTTP_ACCEPT_ENCODING', '') return 'gzip' in enc and msettings['SERVE_REMOTE'] return False
Looks at the RequestContext object and determines if the client supports gzip encoded content. If the client does, we will send them to the gzipped version of files that are allowed to be compressed. Clients without gzip support will be served the original media.
def is_user_attempt_whitelisted(request: AxesHttpRequest, credentials: dict = None) -> bool: username_field = getattr(get_user_model(), 'USERNAME_FIELD', 'username') username_value = get_client_username(request, credentials) kwargs = { username_field: username_value } user_model = get_user_model() try: user = user_model.objects.get(**kwargs) return user.nolockout except (user_model.DoesNotExist, AttributeError): pass return False
Check if the given request or credentials refer to a whitelisted username. A whitelisted user has the magic ``nolockout`` property set. If the property is unknown or False or the user can not be found, this implementation fails gracefully and returns True.
def get_handler(self, grant_type): if grant_type == 'authorization_code': return self.authorization_code elif grant_type == 'refresh_token': return self.refresh_token elif grant_type == 'password': return self.password return None
Return a function or method that is capable handling the ``grant_type`` requested by the client or return ``None`` to indicate that this type of grant type is not supported, resulting in an error response.
def unzip_file(self, zip_path, output_path): with zipfile.ZipFile(zip_path, 'r') as z: z.extractall(output_path)
Unzip a local file into a specified directory.
def get_parent_of_type(typ, obj): if type(typ) is not text: typ = typ.__name__ while hasattr(obj, 'parent'): obj = obj.parent if obj.__class__.__name__ == typ: return obj
Finds first object up the parent chain of the given type. If no parent of the given type exists None is returned. Args: typ(str or python class): The type of the model object we are looking for. obj (model object): Python model object which is the start of the search process.
def dropEvent(self, event): if (event.mimeData().hasFormat("text/plain")): text = to_text_string(event.mimeData().text()) if self.new_input_line: self.on_new_line() self.insert_text(text, at_end=True) self.setFocus() event.setDropAction(Qt.MoveAction) event.accept() else: event.ignore()
Drag and Drop - Drop event
def filter_paths(d, paths, list_of_dicts=False, deepcopy=True): list_of_dicts = '__list__' if list_of_dicts else None all_keys = [x for y in paths if isinstance(y, tuple) for x in y] all_keys += [x for x in paths if not isinstance(x, tuple)] new_d = filter_keys(d, all_keys, list_of_dicts=list_of_dicts) new_d = flatten(d, list_of_dicts=list_of_dicts) for key in list(new_d.keys()): if not any([ set(key).issuperset(path if isinstance(path, tuple) else[path]) for path in paths]): new_d.pop(key) return unflatten(new_d, list_of_dicts=list_of_dicts, deepcopy=deepcopy)
filter dict by certain paths containing key sets Parameters ---------- d : dict paths : list[str] or list[tuple] list_of_dicts: bool treat list of dicts as additional branches deepcopy: bool deepcopy values Examples -------- >>> from pprint import pprint >>> d = {'a':{'b':1,'c':{'d':2}},'e':{'c':3}} >>> filter_paths(d,[('c','d')]) {'a': {'c': {'d': 2}}} >>> d2 = {'a':[{'b':1,'c':3},{'b':1,'c':2}]} >>> pprint(filter_paths(d2,["b"],list_of_dicts=False)) {} >>> pprint(filter_paths(d2,["c"],list_of_dicts=True)) {'a': [{'c': 3}, {'c': 2}]}
def iterate(self): self.counter += 1 self.counter0 += 1 self.revcounter -= 1 self.revcounter0 -= 1 self.first = False self.last = (self.revcounter0 == self.len_values - 1)
Updates values as if we had iterated over the for
def queue_size(self): response = self._request("tasks/list") tasks = json.loads(response.content.decode('utf-8'))["tasks"] return len([t for t in tasks if t['status'] == 'pending'])
Determine Cuckoo sandbox queue length There isn't a built in way to do this like with Joe :rtype: int :return: Number of submissions in sandbox queue.
def find_peaks(dt, r_max=4, footprint=None): r im = dt > 0 if im.ndim != im.squeeze().ndim: warnings.warn('Input image conains a singleton axis:' + str(im.shape) + ' Reduce dimensionality with np.squeeze(im) to avoid' + ' unexpected behavior.') if footprint is None: if im.ndim == 2: footprint = disk elif im.ndim == 3: footprint = ball else: raise Exception("only 2-d and 3-d images are supported") mx = spim.maximum_filter(dt + 2*(~im), footprint=footprint(r_max)) peaks = (dt == mx)*im return peaks
r""" Returns all local maxima in the distance transform Parameters ---------- dt : ND-array The distance transform of the pore space. This may be calculated and filtered using any means desired. r_max : scalar The size of the structuring element used in the maximum filter. This controls the localness of any maxima. The default is 4 voxels. footprint : ND-array Specifies the shape of the structuring element used to define the neighborhood when looking for peaks. If none is specified then a spherical shape is used (or circular in 2D). Returns ------- image : ND-array An array of booleans with ``True`` values at the location of any local maxima. Notes ----- It is also possible ot the ``peak_local_max`` function from the ``skimage.feature`` module as follows: ``peaks = peak_local_max(image=dt, min_distance=r, exclude_border=0, indices=False)`` This automatically uses a square structuring element which is significantly faster than using a circular or spherical element.
def ordered_expected_layers(self): registry = QgsProject.instance() layers = [] count = self.list_layers_in_map_report.count() for i in range(count): layer = self.list_layers_in_map_report.item(i) origin = layer.data(LAYER_ORIGIN_ROLE) if origin == FROM_ANALYSIS['key']: key = layer.data(LAYER_PURPOSE_KEY_OR_ID_ROLE) parent = layer.data(LAYER_PARENT_ANALYSIS_ROLE) layers.append(( FROM_ANALYSIS['key'], key, parent, None )) else: layer_id = layer.data(LAYER_PURPOSE_KEY_OR_ID_ROLE) layer = registry.mapLayer(layer_id) style_document = QDomDocument() layer.exportNamedStyle(style_document) layers.append(( FROM_CANVAS['key'], layer.name(), full_layer_uri(layer), style_document.toString() )) return layers
Get an ordered list of layers according to users input. From top to bottom in the legend: [ ('FromCanvas', layer name, full layer URI, QML), ('FromAnalysis', layer purpose, layer group, None), ... ] The full layer URI is coming from our helper. :return: An ordered list of layers following a structure. :rtype: list
def deactivate_user(self, user): if user.active: user.active = False return True return False
Deactivates a specified user. Returns `True` if a change was made. :param user: The user to deactivate
def get_list(value): if value is None: return [] elif value is NotSet: return NotSet elif isinstance(value, (list, tuple)): return list(value) elif isinstance(value, six.string_types + (lazy_type, )) or uses_type_registry(value): return [value] raise ValueError("Invalid type; expected a list, tuple, or string type, found {0}.".format( type(value).__name__))
Wraps the given value in a list. ``None`` returns an empty list. Lists and tuples are returned as lists. Single strings and registered types are wrapped in a list. :param value: Value to return as a list. :return: List with the provided value(s). :rtype: list
def childDataReceived(self, childFD, data): protocol = getattr(self, 'protocol', None) if protocol: protocol.dataReceived(data) else: self.data.append((childFD, data))
Relay data received on any file descriptor to the process
async def endorsements(self, root): text = root.find('ENDORSEMENTS').text return [Nation(name) for name in text.split(',')] if text else []
Regional neighbours endorsing the nation. Returns ------- an :class:`ApiQuery` of a list of :class:`Nation`
def read_geo(fid, key): dsid = GEO_NAMES[key.name] add_epoch = False if "time" in key.name: days = fid["/L1C/" + dsid["day"]].value msecs = fid["/L1C/" + dsid["msec"]].value data = _form_datetimes(days, msecs) add_epoch = True dtype = np.float64 else: data = fid["/L1C/" + dsid].value dtype = np.float32 data = xr.DataArray(da.from_array(data, chunks=CHUNK_SIZE), name=key.name, dims=['y', 'x']).astype(dtype) if add_epoch: data.attrs['sensing_time_epoch'] = EPOCH return data
Read geolocation and related datasets.
def fetch_page(self, title, method='GET'): params = { 'prop': 'revisions', 'format': 'json', 'action': 'query', 'explaintext': '', 'titles': title, 'rvprop': 'content' } r = self.request(method, self.base_url, params=params) r.raise_for_status() pages = r.json()["query"]["pages"] pageid = list(pages.keys())[0] if pageid == '-1': raise ArticleNotFound('no matching articles returned') return pages[pageid]
Query for page by title
def dist(self, x1, x2): if self.exponent == 2.0: return float(np.sqrt(self.const) * _norm_default(x1 - x2)) elif self.exponent == float('inf'): return float(self.const * _pnorm_default(x1 - x2, self.exponent)) else: return float((self.const ** (1 / self.exponent) * _pnorm_default(x1 - x2, self.exponent)))
Return the weighted distance between ``x1`` and ``x2``. Parameters ---------- x1, x2 : `NumpyTensor` Tensors whose mutual distance is calculated. Returns ------- dist : float The distance between the tensors.
def hash(filename, algorithm='sha256'): if incompatible: raise Incompatible if algorithm not in ['sha256', 'sha384', 'sha512']: raise InvalidArguments('Algorithm {} not supported'.format(algorithm)) result = call('hash', '--algorithm', algorithm, filename) return result.strip().split(':')[-1]
Hash the given filename. Unavailable in `pip<8.0.0`
def report_issue(self, body=None, title=None, open_webpage=False): if body is None: from spyder.widgets.reporterror import SpyderErrorDialog report_dlg = SpyderErrorDialog(self, is_report=True) report_dlg.show() else: if open_webpage: if PY3: from urllib.parse import quote else: from urllib import quote from qtpy.QtCore import QUrlQuery url = QUrl(__project_url__ + '/issues/new') query = QUrlQuery() query.addQueryItem("body", quote(body)) if title: query.addQueryItem("title", quote(title)) url.setQuery(query) QDesktopServices.openUrl(url)
Report a Spyder issue to github, generating body text if needed.
def get_match_history_by_sequence_num(start_at_match_seq_num, matches_requested=None, **kwargs): params = { "start_at_match_seq_num": start_at_match_seq_num, "matches_requested": matches_requested } return make_request("GetMatchHistoryBySequenceNum", params, **kwargs)
Most recent matches ordered by sequence number
def __get_job_status(self): job = self.__get_job() if "succeeded" in job.obj["status"] and job.obj["status"]["succeeded"] > 0: job.scale(replicas=0) if self.print_pod_logs_on_exit: self.__print_pod_logs() if self.delete_on_success: self.__delete_job_cascade(job) return "SUCCEEDED" if "failed" in job.obj["status"]: failed_cnt = job.obj["status"]["failed"] self.__logger.debug("Kubernetes job " + self.uu_name + " status.failed: " + str(failed_cnt)) if self.print_pod_logs_on_exit: self.__print_pod_logs() if failed_cnt > self.max_retrials: job.scale(replicas=0) return "FAILED" return "RUNNING"
Return the Kubernetes job status
def get_info(self, key=None, Id=None) -> dict: if key is not None: Id = self[key].Id return self.infos.get(Id,{})
Returns information associated with Id or list index
def flatten(list_of_lists): flat_list = [] for sublist in list_of_lists: if isinstance(sublist, string_types) or isinstance(sublist, int): flat_list.append(sublist) elif sublist is None: continue elif not isinstance(sublist, string_types) and len(sublist) == 1: flat_list.append(sublist[0]) else: flat_list.append(tuple(sublist)) return flat_list
Flatten a list of lists but maintain strings and ints as entries.
def root(self): node = self while node.package is not None: node = node.package return node
Property to return the root of this node. Returns: Package: this node's root package.
def serialize(self, content): worker = JSONSerializer( scheme=self.resource, options=self.resource._meta.emit_options, format=self.resource._meta.emit_format, **self.resource._meta.emit_models ) return worker.serialize(content)
Serialize to JSON. :return string: serializaed JSON
def _get_logical(source_lines, result, logical_start, logical_end): row = result['line'] - 1 col = result['column'] - 1 ls = None le = None for i in range(0, len(logical_start), 1): assert logical_end x = logical_end[i] if x[0] > row or (x[0] == row and x[1] > col): le = x ls = logical_start[i] break if ls is None: return None original = source_lines[ls[0]:le[0] + 1] return ls, le, original
Return the logical line corresponding to the result. Assumes input is already E702-clean.
def download(self, request, **kwargs): self.method_check(request, allowed=['get']) basic_bundle = self.build_bundle(request=request) tileset = self.cached_obj_get( bundle=basic_bundle, **self.remove_api_resource_names(kwargs)) filename = helpers.get_tileset_filename(tileset) filename = os.path.abspath(filename) if os.path.isfile(filename): response = serve(request, os.path.basename(filename), os.path.dirname(filename)) response['Content-Disposition'] = 'attachment; filename="{}"'.format(os.path.basename(filename)) else: response = self.create_response(request, {'status': 'not generated'}) return response
proxy for the helpers.tileset_download method
def _find(self, root, tagname, id=None): if id is None: result = root.find('.//%s' % tagname) if result is None: raise LookupError('Cannot find any %s elements' % tagname) else: return result else: result = [ elem for elem in root.findall('.//%s' % tagname) if elem.attrib.get('id', '') == id ] if len(result) == 0: raise LookupError('Cannot find a %s element with id %s' % (tagname, id)) elif len(result) > 1: raise LookupError('Found multiple %s elements with id %s' % (tagname, id)) else: return result[0]
Returns the first element with the specified tagname and id
def execute(self): self.start_stemming_process() if self.dictionary.contains(self.current_word): self.result = self.current_word else: self.result = self.original_word
Execute stemming process; the result can be retrieved with result
def _pip_cmd(self, name=None, prefix=None): if (name and prefix) or not (name or prefix): raise TypeError("conda pip: exactly one of 'name' ""or 'prefix' " "required.") if name and self.environment_exists(name=name): prefix = self.get_prefix_envname(name) if sys.platform == 'win32': python = join(prefix, 'python.exe') pip = join(prefix, 'pip.exe') else: python = join(prefix, 'bin/python') pip = join(prefix, 'bin/pip') cmd_list = [python, pip] return cmd_list
Get pip location based on environment `name` or `prefix`.
def calinski_harabaz_score(X, labels): X, labels = check_X_y(X, labels) le = LabelEncoder() labels = le.fit_transform(labels) n_samples, _ = X.shape n_labels = len(le.classes_) check_number_of_labels(n_labels, n_samples) extra_disp, intra_disp = 0., 0. mean = np.mean(X, axis=0) for k in range(n_labels): cluster_k = X[labels == k] mean_k = np.mean(cluster_k, axis=0) extra_disp += len(cluster_k) * np.sum((mean_k - mean) ** 2) intra_disp += np.sum((cluster_k - mean_k) ** 2) return (1. if intra_disp == 0. else extra_disp * (n_samples - n_labels) / (intra_disp * (n_labels - 1.)))
Compute the Calinski and Harabaz score. The score is defined as ratio between the within-cluster dispersion and the between-cluster dispersion. Read more in the :ref:`User Guide <calinski_harabaz_index>`. Parameters ---------- X : array-like, shape (``n_samples``, ``n_features``) List of ``n_features``-dimensional data points. Each row corresponds to a single data point. labels : array-like, shape (``n_samples``,) Predicted labels for each sample. Returns ------- score : float The resulting Calinski-Harabaz score. References ---------- .. [1] `T. Calinski and J. Harabasz, 1974. "A dendrite method for cluster analysis". Communications in Statistics <http://www.tandfonline.com/doi/abs/10.1080/03610927408827101>`_
def split(self, point=None): if point is None: point = len(self) / 2 r1 = Sequence(self.name + ".1", self.sequenceData[:point]) r2 = Sequence(self.name + ".2", self.sequenceData[point:]) return r1, r2
Split this sequence into two halves and return them. The original sequence remains unmodified. :param point: defines the split point, if None then the centre is used :return: two Sequence objects -- one for each side
def read_cz_lsm_info(fd, byte_order, dtype, count): result = numpy.rec.fromfile(fd, CZ_LSM_INFO, 1, byteorder=byte_order)[0] {50350412: '1.3', 67127628: '2.0'}[result.magic_number] return result
Read CS_LSM_INFO tag from file and return as numpy.rec.array.
def _redis_notifier(state): tstamp = time.time() state._tstamp = tstamp conf = state.app.config r = redis.client.StrictRedis() r.publish(conf.get('WAFFLE_REDIS_CHANNEL', 'waffleconf'), tstamp)
Notify of configuration update through redis. Arguments: state (_WaffleState): Object that contains reference to app and its configstore.
def write(self, x: int, y: int, text: str, transposed_text: 'Optional[str]' = None): entry = self.entries.get((x, y), _DiagramText('', '')) self.entries[(x, y)] = _DiagramText( entry.text + text, entry.transposed_text + (transposed_text if transposed_text else text))
Adds text to the given location. Args: x: The column in which to write the text. y: The row in which to write the text. text: The text to write at location (x, y). transposed_text: Optional text to write instead, if the text diagram is transposed.
def version(self, value): self.bytearray[self._get_slicers(1)] = bytearray(c_uint8(value or 0))
Version setter.
def _full_diff(merge_result, key, context_lines=3): header_printed = False for group in _split_diff(merge_result, context_lines=context_lines): if not header_printed: header_printed = True yield color.Header('diff a/%s b/%s' % (key, key)) yield color.DeletedHeader('--- %s' % key) yield color.AddedHeader('+++ %s' % key) for l in _diff_group(group): yield l
Generate a full diff based on a Weave merge result
def _shorten_line_at_tokens_new(tokens, source, indentation, max_line_length): yield indentation + source parsed_tokens = _parse_tokens(tokens) if parsed_tokens: fixed = _reflow_lines(parsed_tokens, indentation, max_line_length, start_on_prefix_line=True) if fixed and check_syntax(normalize_multiline(fixed.lstrip())): yield fixed fixed = _reflow_lines(parsed_tokens, indentation, max_line_length, start_on_prefix_line=False) if fixed and check_syntax(normalize_multiline(fixed.lstrip())): yield fixed
Shorten the line taking its length into account. The input is expected to be free of newlines except for inside multiline strings and at the end.
def export_aliases(export_path=None, exclusions=None): if not export_path: export_path = os.path.abspath(ALIAS_FILE_NAME) alias_table = get_alias_table() for exclusion in exclusions or []: if exclusion not in alias_table.sections(): raise CLIError(ALIAS_NOT_FOUND_ERROR.format(exclusion)) alias_table.remove_section(exclusion) _commit_change(alias_table, export_path=export_path, post_commit=False) logger.warning(POST_EXPORT_ALIAS_MSG, export_path)
Export all registered aliases to a given path, as an INI configuration file. Args: export_path: The path of the alias configuration file to export to. exclusions: Space-separated aliases excluded from export.
def delete(self, custom_field, params={}, **options): path = "/custom_fields/%s" % (custom_field) return self.client.delete(path, params, **options)
A specific, existing custom field can be deleted by making a DELETE request on the URL for that custom field. Returns an empty data record. Parameters ---------- custom_field : {Id} Globally unique identifier for the custom field.
def append(self, obj): if isinstance(obj, str): obj = KQMLToken(obj) self.data.append(obj)
Append an element to the end of the list. Parameters ---------- obj : KQMLObject or str If a string is passed, it is instantiated as a KQMLToken before being added to the list.
def save(self): if self.id: method = 'put' resource = self.RESOURCE.format( account_id=self.account.id, tailored_audience_id=self.tailored_audience_id, id=self.id) else: method = 'post' resource = self.RESOURCE_COLLECTION.format( account_id=self.account.id, tailored_audience_id=self.tailored_audience_id) response = Request( self.account.client, method, resource, params=self.to_params()).perform() return self.from_response(response.body['data'])
Saves or updates the current tailored audience permission.
def skew_x(self, x): self.root.set("transform", "%s skewX(%f)" % (self.root.get("transform") or '', x)) return self
Skew element along the x-axis by the given angle. Parameters ---------- x : float x-axis skew angle in degrees
def disassemble_instruction(self, instruction): if not util.is_integer(instruction): raise TypeError('Expected instruction to be an integer.') buf_size = self.MAX_BUF_SIZE buf = (ctypes.c_char * buf_size)() res = self._dll.JLINKARM_DisassembleInst(ctypes.byref(buf), buf_size, instruction) if res < 0: raise errors.JLinkException('Failed to disassemble instruction.') return ctypes.string_at(buf).decode()
Disassembles and returns the assembly instruction string. Args: self (JLink): the ``JLink`` instance. instruction (int): the instruction address. Returns: A string corresponding to the assembly instruction string at the given instruction address. Raises: JLinkException: on error. TypeError: if ``instruction`` is not a number.
def split_demultiplexed_sampledata(data, demultiplexed): datadicts = [] samplename = dd.get_sample_name(data) for fastq in demultiplexed: barcode = os.path.basename(fastq).split(".")[0] datadict = copy.deepcopy(data) datadict = dd.set_sample_name(datadict, samplename + "-" + barcode) datadict = dd.set_description(datadict, samplename + "-" + barcode) datadict["rgnames"]["rg"] = samplename + "-" + barcode datadict["name"]= ["", samplename + "-" + barcode] datadict["files"] = [fastq] datadicts.append(datadict) return datadicts
splits demultiplexed samples into separate entries in the global sample datadict
def printWelcomeMessage(msg, place=10): logging.debug('*' * 30) welcome = ' ' * place welcome+= msg logging.debug(welcome) logging.debug('*' * 30 + '\n')
Print any welcome message
def validate_path(path): if not isinstance(path, six.string_types) or not re.match('^/(?:[._a-zA-Z0-9-]/?)+[^/]$', path): raise InvalidUsage( "Path validation failed - Expected: '/<component>[/component], got: %s" % path ) return True
Validates the provided path :param path: path to validate (string) :raise: :InvalidUsage: If validation fails.
def get_first_mapping(cls): from .models import Indexable if issubclass(cls, Indexable) and hasattr(cls, "Mapping"): return cls.Mapping for base in cls.__bases__: mapping = get_first_mapping(base) if mapping: return mapping return None
This allows for Django-like inheritance of mapping configurations
def unfold_file(self, path): yaml_config = self.file_index.unfold_yaml(path) self.unfold_config(path, yaml_config)
Parse given file and add it to graph
def getTableCount(verbose=None): response=api(url=self.url+'tables/count', method="GET", verbose=verbose, parse_params=False) return response
Returns the number of global tables. :param verbose: print more :returns: 200: successful operation
def get_string_width(self, s): "Get width of a string in the current font" s = self.normalize_text(s) cw=self.current_font['cw'] w=0 l=len(s) if self.unifontsubset: for char in s: char = ord(char) if len(cw) > char: w += cw[char] elif (self.current_font['desc']['MissingWidth']) : w += self.current_font['desc']['MissingWidth'] else: w += 500 else: for i in range(0, l): w += cw.get(s[i],0) return w*self.font_size/1000.0
Get width of a string in the current font
def to_xml(node, pretty=False): fout = Sio() etree = et.ElementTree(node) etree.write(fout) xml = fout.getvalue() if pretty: xml = pretty_xml(xml, True) return xml
convert an etree node to xml
def parse_header_part(self, data): packet_length = data[0] packet_type = data[1] packet_subtype = data[2] sequence_number = data[3] return { 'packet_length': packet_length, 'packet_type': packet_type, 'packet_type_name': self.PACKET_TYPES.get(packet_type), 'packet_subtype': packet_subtype, 'packet_subtype_name': self.PACKET_SUBTYPES.get(packet_subtype), 'sequence_number': sequence_number }
Extracts and converts the RFX common header part of all valid packets to a plain dictionary. RFX header part is the 4 bytes prior the sensor vendor specific data part. The RFX common header part contains respectively: - packet length - packet type - packet sub-type - sequence number :param data: bytearray of received data :type data: bytearray
def parse_signature(cls, function): annotations = function.__annotations__.copy() del annotations['return'] result = [] for param_name, (param_type, param_obj) in annotations.items(): sig_param = function.signature.parameters[param_name] param_description = { 'paramType': param_type, 'name': param_name, 'required': sig_param.default is inspect.Parameter.empty} param_description.update(param_obj.describe()) result.append(param_description) return result
Parses the signature of a method and its annotations to swagger. Return a dictionary {arg_name: info}.
def run_command(self, cmd, input_data=None): kwargs = { 'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE, } if input_data is not None: kwargs['stdin'] = subprocess.PIPE stdout = [] stderr = [] p = subprocess.Popen(cmd, **kwargs) t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout)) t1.start() t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr)) t2.start() if input_data is not None: p.stdin.write(input_data) p.stdin.close() p.wait() t1.join() t2.join() return p.returncode, stdout, stderr
Run a command in a child process , passing it any input data specified. :param cmd: The command to run. :param input_data: If specified, this must be a byte string containing data to be sent to the child process. :return: A tuple consisting of the subprocess' exit code, a list of lines read from the subprocess' ``stdout``, and a list of lines read from the subprocess' ``stderr``.
def to_record(self): tf_list = [getattr(self, k, None) for k in [_.value for _ in TLSFileType]] tf_list = filter(lambda x: x, tf_list) files = {tf.file_type.value: tf.file_path for tf in tf_list} self.record['files'] = files return self.record
Create a CertStore record from this TLSFileBundle
def overwrite_stage_variables(self, ret, stage_variables): res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not res.get('overwrite'): ret['result'] = False ret['abort'] = True ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'overwrite_stage_variables', res.get('stage')) return ret
overwrite the given stage_name's stage variables with the given stage_variables
def only_on_master(function): @wraps(function) def inner_function(self, *args, **kwargs): if not self.is_coordinator: message = 'The method or property "{0}" can only be called/used '\ 'on the coordinator in a group'.format(function.__name__) raise SoCoSlaveException(message) return function(self, *args, **kwargs) return inner_function
Decorator that raises SoCoSlaveException on master call on slave.
def extract_metrics(self, metrics_files): extension_maps = dict( align_metrics=(self._parse_align_metrics, "AL"), dup_metrics=(self._parse_dup_metrics, "DUP"), hs_metrics=(self._parse_hybrid_metrics, "HS"), insert_metrics=(self._parse_insert_metrics, "INS"), rnaseq_metrics=(self._parse_rnaseq_metrics, "RNA")) all_metrics = dict() for fname in metrics_files: ext = os.path.splitext(fname)[-1][1:] try: parse_fn, prefix = extension_maps[ext] except KeyError: parse_fn = None if parse_fn: with open(fname) as in_handle: for key, val in parse_fn(in_handle).items(): if not key.startswith(prefix): key = "%s_%s" % (prefix, key) all_metrics[key] = val return all_metrics
Return summary information for a lane of metrics files.
def closer_than(self, mesh, radius): dists = geodetic.distance(self.longitude, self.latitude, self.depth, mesh.lons, mesh.lats, 0 if mesh.depths is None else mesh.depths) return dists <= radius
Check for proximity of points in the ``mesh``. :param mesh: :class:`openquake.hazardlib.geo.mesh.Mesh` instance. :param radius: Proximity measure in km. :returns: Numpy array of boolean values in the same shape as the mesh coordinate arrays with ``True`` on indexes of points that are not further than ``radius`` km from this point. Function :func:`~openquake.hazardlib.geo.geodetic.distance` is used to calculate distances to points of the mesh. Points of the mesh that lie exactly ``radius`` km away from this point also have ``True`` in their indices.
def addCallSetFromName(self, sampleName): callSet = CallSet(self, sampleName) self.addCallSet(callSet)
Adds a CallSet for the specified sample name.
def write_file(path, content, mode=None, encoding='utf-8'): if not mode: if isinstance(content, bytes): mode = 'wb' else: mode = 'wt' if not path: raise ValueError("Output path is invalid") else: getLogger().debug("Writing content to {}".format(path)) if mode in ('w', 'wt') and not isinstance(content, str): content = to_string(content) elif mode == 'wb': if not isinstance(content, str): content = to_string(content).encode(encoding) else: content = content.encode(encoding) if str(path).endswith('.gz'): with gzip.open(path, mode) as outfile: outfile.write(content) else: with open(path, mode=mode) as outfile: outfile.write(content)
Write content to a file. If the path ends with .gz, gzip will be used.
def protect(self, password=None, read_protect=False, protect_from=0): return super(NTAG203, self).protect( password, read_protect, protect_from)
Set lock bits to disable future memory modifications. If *password* is None, all memory pages except the 16-bit counter in page 41 are protected by setting the relevant lock bits (note that lock bits can not be reset). If valid NDEF management data is found in page 4, protect() also sets the NDEF write flag to read-only. The NTAG203 can not be password protected. If a *password* argument is provided, the protect() method always returns False.
def setup(provider=None): site = init(provider) if not site: site = yaml.safe_load(_read_file(DEPLOY_YAML)) provider_class = PROVIDERS[site['provider']] provider_class.init(site)
Creates the provider config files needed to deploy your project
def is_valid_address (s): try: pairs = s.split (":") if len (pairs) != 6: return False if not all(0 <= int(b, 16) <= 255 for b in pairs): return False except: return False return True
returns True if address is a valid Bluetooth address valid address are always strings of the form XX:XX:XX:XX:XX:XX where X is a hexadecimal character. For example, 01:23:45:67:89:AB is a valid address, but IN:VA:LI:DA:DD:RE is not
def image_gen(normalizer, denorm, sz, tfms=None, max_zoom=None, pad=0, crop_type=None, tfm_y=None, sz_y=None, pad_mode=cv2.BORDER_REFLECT, scale=None): if tfm_y is None: tfm_y=TfmType.NO if tfms is None: tfms=[] elif not isinstance(tfms, collections.Iterable): tfms=[tfms] if sz_y is None: sz_y = sz if scale is None: scale = [RandomScale(sz, max_zoom, tfm_y=tfm_y, sz_y=sz_y) if max_zoom is not None else Scale(sz, tfm_y, sz_y=sz_y)] elif not is_listy(scale): scale = [scale] if pad: scale.append(AddPadding(pad, mode=pad_mode)) if crop_type!=CropType.GOOGLENET: tfms=scale+tfms return Transforms(sz, tfms, normalizer, denorm, crop_type, tfm_y=tfm_y, sz_y=sz_y)
Generate a standard set of transformations Arguments --------- normalizer : image normalizing function denorm : image denormalizing function sz : size, sz_y = sz if not specified. tfms : iterable collection of transformation functions max_zoom : float, maximum zoom pad : int, padding on top, left, right and bottom crop_type : crop type tfm_y : y axis specific transformations sz_y : y size, height pad_mode : cv2 padding style: repeat, reflect, etc. Returns ------- type : ``Transforms`` transformer for specified image operations. See Also -------- Transforms: the transformer object returned by this function
def wrplt(self, fout_dir, plt_ext="png"): basename = self.grprobj.get_fout_base(self.ntplt.hdrgo) plt_pat = self.get_pltpat(plt_ext) fout_basename = plt_pat.format(BASE=basename) fout_plt = os.path.join(fout_dir, fout_basename) self.gosubdagplot.plt_dag(fout_plt) return fout_plt
Write png containing plot of GoSubDag.
def access_token(self): if self.cache_token: return self.access_token_ or \ self._resolve_credential('access_token') return self.access_token_
Get access_token.
def write_crc32(fo, bytes): data = crc32(bytes) & 0xFFFFFFFF fo.write(pack('>I', data))
A 4-byte, big-endian CRC32 checksum
def _extract_response_xml(self, domain, response): attributes = {} alexa_keys = {'POPULARITY': 'TEXT', 'REACH': 'RANK', 'RANK': 'DELTA'} try: xml_root = ET.fromstring(response._content) for xml_child in xml_root.findall('SD//'): if xml_child.tag in alexa_keys and \ alexa_keys[xml_child.tag] in xml_child.attrib: attributes[xml_child.tag.lower( )] = xml_child.attrib[alexa_keys[xml_child.tag]] except ParseError: pass attributes['domain'] = domain return {'attributes': attributes}
Extract XML content of an HTTP response into dictionary format. Args: response: HTML Response objects Returns: A dictionary: {alexa-ranking key : alexa-ranking value}.
def make_regex(separator): return re.compile(r'(?:' + re.escape(separator) + r')?((?:[^' + re.escape(separator) + r'\\]|\\.)+)')
Utility function to create regexp for matching escaped separators in strings.
def declalltypes(self): for f in self.body: if (hasattr(f, '_ctype') and f._ctype._storage == Storages.TYPEDEF): yield f
generator on all declaration of type
def pre_save(self, model_instance, add): value = super( LinkedTZDateTimeField, self ).pre_save( model_instance=model_instance, add=add ) value = self._convert_value( value=value, model_instance=model_instance, add=add ) setattr(model_instance, self.attname, value) return value
Converts the value being saved based on `populate_from` and `time_override`
def url_name_for_action(self, action): return "%s.%s_%s" % (self.module_name.lower(), self.model_name.lower(), action)
Returns the reverse name for this action
def add_internal_subnet(self, context_id, subnet_id): return self.context.addPrivateSubnetToNetworkTunnel(subnet_id, id=context_id)
Add an internal subnet to a tunnel context. :param int context_id: The id-value representing the context instance. :param int subnet_id: The id-value representing the internal subnet. :return bool: True if internal subnet addition was successful.
def absnormpath(self, path): path = self.normcase(path) cwd = self._matching_string(path, self.cwd) if not path: path = self.path_separator elif not self._starts_with_root_path(path): root_name = self._matching_string(path, self.root.name) empty = self._matching_string(path, '') path = self._path_separator(path).join( (cwd != root_name and cwd or empty, path)) if path == self._matching_string(path, '.'): path = cwd return self.normpath(path)
Absolutize and minimalize the given path. Forces all relative paths to be absolute, and normalizes the path to eliminate dot and empty components. Args: path: Path to normalize. Returns: The normalized path relative to the current working directory, or the root directory if path is empty.
def update_bios_data_by_patch(self, data): bios_settings_data = { 'Attributes': data } self._conn.patch(self.path, data=bios_settings_data)
Update bios data by patch :param data: default bios config data
def split_semicolon(line, maxsplit=None): r split_line = line.split(';') split_line_size = len(split_line) if maxsplit is None or maxsplit < 0: maxsplit = split_line_size i = 0 while i < split_line_size - 1: ends = split_line[i].endswith('\\') if ends: split_line[i] = split_line[i][:-1] if (ends or i >= maxsplit) and i < split_line_size - 1: split_line[i] = ";".join([split_line[i], split_line[i + 1]]) del split_line[i + 1] split_line_size -= 1 else: i += 1 return split_line
r"""Split a line on semicolons characters but not on the escaped semicolons :param line: line to split :type line: str :param maxsplit: maximal number of split (if None, no limit) :type maxsplit: None | int :return: split line :rtype: list >>> split_semicolon('a,b;c;;g') ['a,b', 'c', '', 'g'] >>> split_semicolon('a,b;c;;g', 2) ['a,b', 'c', ';g'] >>> split_semicolon(r'a,b;c\;;g', 2) ['a,b', 'c;', 'g']
def get_parts(self): parts = [] start_byte = 0 for i in range(1, self.total + 1): end_byte = start_byte + self.part_size if end_byte >= self.file_size - 1: end_byte = self.file_size parts.append({ 'part': i, 'offset': start_byte, 'limit': end_byte }) start_byte = end_byte return parts
Partitions the file and saves the parts to be uploaded in memory.
def pkginfo(name, version, arch, repoid, install_date=None, install_date_time_t=None): pkginfo_tuple = collections.namedtuple( 'PkgInfo', ('name', 'version', 'arch', 'repoid', 'install_date', 'install_date_time_t') ) return pkginfo_tuple(name, version, arch, repoid, install_date, install_date_time_t)
Build and return a pkginfo namedtuple
def get_prefix(self): for key, value in self.pages_config.items(): if not hasattr(value, '__iter__'): value = (value, ) for item in value: if type(self.node) == item\ or type(self.node) == getattr(item, 'model', None): return key
Each resource defined in config for pages as dict. This method returns key from config where located current resource.
def get_assessments_taken_by_ids(self, assessment_taken_ids): collection = JSONClientValidated('assessment', collection='AssessmentTaken', runtime=self._runtime) object_id_list = [] for i in assessment_taken_ids: object_id_list.append(ObjectId(self._get_id(i, 'assessment').get_identifier())) result = collection.find( dict({'_id': {'$in': object_id_list}}, **self._view_filter())) result = list(result) sorted_result = [] for object_id in object_id_list: for object_map in result: if object_map['_id'] == object_id: sorted_result.append(object_map) break return objects.AssessmentTakenList(sorted_result, runtime=self._runtime, proxy=self._proxy)
Gets an ``AssessmentTakenList`` corresponding to the given ``IdList``. In plenary mode, the returned list contains all of the assessments specified in the ``Id`` list, in the order of the list, including duplicates, or an error results if an ``Id`` in the supplied list is not found or inaccessible. Otherwise, inaccessible ``AssessmentTaken`` objects may be omitted from the list and may present the elements in any order including returning a unique set. arg: assessment_taken_ids (osid.id.IdList): the list of ``Ids`` to retrieve return: (osid.assessment.AssessmentTakenList) - the returned ``AssessmentTaken list`` raise: NotFound - an ``Id was`` not found raise: NullArgument - ``assessment_taken_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - assessment failure *compliance: mandatory -- This method must be implemented.*
def toggle_use_font_background_sensitivity(self, chk): self.get_widget('palette_16').set_sensitive(chk.get_active()) self.get_widget('palette_17').set_sensitive(chk.get_active())
If the user chooses to use the gnome default font configuration it means that he will not be able to use the font selector.
async def _async_stop(self): if self.presence: self.presence.set_unavailable() for behav in self.behaviours: behav.kill() if self.web.is_started(): await self.web.runner.cleanup() if self.is_alive(): self.client.stop() aexit = self.conn_coro.__aexit__(*sys.exc_info()) await aexit logger.info("Client disconnected.") self._alive.clear()
Stops an agent and kills all its behaviours.
def search(self, category = None, cuisine = None, location = (None, None), radius = None, tl_coord = (None, None), \ br_coord = (None, None), name = None, country = None, locality = None, \ region = None, postal_code = None, street_address = None,\ website_url = None, has_menu = None, open_at = None): params = self._get_params(category = category, cuisine = cuisine, location = location, radius = radius, tl_coord = tl_coord, \ br_coord = br_coord, name = name, country = country, locality = locality, \ region = region, postal_code = postal_code, street_address = street_address, \ website_url = website_url, has_menu = has_menu, open_at = open_at) return self._create_query('search', params)
Locu Venue Search API Call Wrapper Args: *Note that none of the arguments are required category : List of category types that need to be filtered by: ['restaurant', 'spa', 'beauty salon', 'gym', 'laundry', 'hair care', 'other'] type : [string] cuisine : List of cuisine types that need to be filtered by: ['american', 'italian', ...] type : [string] location : Tuple that consists of (latitude, longtitude) coordinates type : tuple(float, float) radius : Radius around the given lat, long type : float tl_coord : Tuple that consists of (latitude, longtitude) for bounding box top left coordinates type : tuple(float, float) br_coord : Tuple that consists of (latitude, longtitude) for bounding box bottom right coordinates type : tuple(float, float) name : Name of the venue type : string country : Country where venue is located type : string locality : Locality. Ex 'San Francisco' type : string region : Region/state. Ex. 'CA' type : string postal_code : Postal code type : string street_address : Address type : string open_at : Search for venues open at the specified time type : datetime website_url : Filter by the a website url type : string has_menu : Filter venues that have menus in them type : boolean Returns: A dictionary with a data returned by the server Raises: HttpException with the error message from the server
def to(self, unit): u = Unit("0cm") u.value = self.value/self.per_inch[self.unit]*self.per_inch[unit] u.unit = unit return u
Convert to a given unit. Parameters ---------- unit : str Name of the unit to convert to. Returns ------- u : Unit new Unit object with the requested unit and computed value.
def spkw17(handle, body, center, inframe, first, last, segid, epoch, eqel, rapol, decpol): handle = ctypes.c_int(handle) body = ctypes.c_int(body) center = ctypes.c_int(center) inframe = stypes.stringToCharP(inframe) first = ctypes.c_double(first) last = ctypes.c_double(last) segid = stypes.stringToCharP(segid) epoch = ctypes.c_double(epoch) eqel = stypes.toDoubleVector(eqel) rapol = ctypes.c_double(rapol) decpol = ctypes.c_double(decpol) libspice.spkw17_c(handle, body, center, inframe, first, last, segid, epoch, eqel, rapol, decpol)
Write an SPK segment of type 17 given a type 17 data record. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkw17_c.html :param handle: Handle of an SPK file open for writing. :type handle: int :param body: Body code for ephemeris object. :type body: int :param center: Body code for the center of motion of the body. :type center: int :param inframe: The reference frame of the states. :type inframe: str :param first: First valid time for which states can be computed. :type first: float :param last: Last valid time for which states can be computed. :type last: float :param segid: Segment identifier. :type segid: str :param epoch: Epoch of elements in seconds past J2000. :type epoch: float :param eqel: Array of equinoctial elements. :type eqel: 9-Element Array of floats :param rapol: Right Ascension of the pole of the reference plane. :type rapol: float :param decpol: Declination of the pole of the reference plane. :type decpol: float
def html_attributes(self): extra_attributes = '' if self.element_id is not None: extra_attributes = ' id="%s"' % self.element_id if self.style_class is not None: extra_attributes = '%s class="%s"' % ( extra_attributes, self.style_class) if self.attributes is not None: extra_attributes = '%s %s' % (extra_attributes, self.attributes) return extra_attributes
Get extra html attributes such as id and class.
def _module_to_base_modules(s): parts = s.split('.') for i in range(1, len(parts)): yield '.'.join(parts[:i])
return all module names that would be imported due to this import-import
def to_pandas(self, wrap=False, **kwargs): try: import pandas as pd except ImportError: raise DependencyNotInstalledError( 'to_pandas requires for `pandas` library') def wrapper(result): df = result.values if wrap: from .. import DataFrame df = DataFrame(df) return df[self.name] return self.execute(wrapper=wrapper, **kwargs)
Convert to pandas Series. Execute at once. :param wrap: if True, wrap the pandas DataFrame into a PyODPS DataFrame :return: pandas Series
def fetch(self): params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return UserChannelInstance( self._version, payload, service_sid=self._solution['service_sid'], user_sid=self._solution['user_sid'], channel_sid=self._solution['channel_sid'], )
Fetch a UserChannelInstance :returns: Fetched UserChannelInstance :rtype: twilio.rest.chat.v2.service.user.user_channel.UserChannelInstance
def _load(self, f, layer=None, source=None): if hasattr(f, 'read'): self._loads(f.read(), layer=layer, source=source) else: with open(f) as f: self._loads(f.read(), layer=layer, source=source)
Load data from a yaml formatted file. Parameters ---------- f : str or file like object If f is a string then it is interpreted as a path to the file to load If it is a file like object then data is read directly from it. layer : str layer to load data into. If none is supplied the outermost one is used source : str Source to attribute the values to
def html_entity_decode_char(self, m, defs=htmlentities.entitydefs): try: char = defs[m.group(1)] return "&{char};".format(char=char) except ValueError: return m.group(0) except KeyError: return m.group(0)
decode html entity into one of the html char
def set_body_s(self, stream): if self.argstreams[2].state == StreamState.init: self.argstreams[2] = stream else: raise TChannelError( "Unable to change the body since the streaming has started")
Set customized body stream. Note: the body stream can only be changed before the stream is consumed. :param stream: InMemStream/PipeStream for body :except TChannelError: Raise TChannelError if the stream is being sent when you try to change the stream.
def get_mapping_variable(variable_name, variables_mapping): try: return variables_mapping[variable_name] except KeyError: raise exceptions.VariableNotFound("{} is not found.".format(variable_name))
get variable from variables_mapping. Args: variable_name (str): variable name variables_mapping (dict): variables mapping Returns: mapping variable value. Raises: exceptions.VariableNotFound: variable is not found.
def mad(a): a = np.asfarray(a).flatten() return np.median(np.abs(a - np.median(a)))
Calculate the median absolute deviation of a sample a - a numpy array-like collection of values returns the median of the deviation of a from its median.
def parse_time_division(self, bytes): value = self.bytes_to_int(bytes) if not value & 0x8000: return {'fps': False, 'ticks_per_beat': value & 0x7FFF} else: SMPTE_frames = (value & 0x7F00) >> 2 if SMPTE_frames not in [24, 25, 29, 30]: raise TimeDivisionError, \ "'%d' is not a valid value for the number of SMPTE frames"\ % SMPTE_frames clock_ticks = (value & 0x00FF) >> 2 return {'fps': True, 'SMPTE_frames': SMPTE_frames, 'clock_ticks': clock_ticks}
Parse the time division found in the header of a MIDI file and return a dictionary with the boolean fps set to indicate whether to use frames per second or ticks per beat. If fps is True, the values SMPTE_frames and clock_ticks will also be set. If fps is False, ticks_per_beat will hold the value.
def _generate_recommendation(self, query_analysis, db_name, collection_name): index_rec = '{' for query_field in query_analysis['analyzedFields']: if query_field['fieldType'] is EQUIV_TYPE: if len(index_rec) is not 1: index_rec += ', ' index_rec += '"' + query_field['fieldName'] + '": 1' for query_field in query_analysis['analyzedFields']: if query_field['fieldType'] is SORT_TYPE: if len(index_rec) is not 1: index_rec += ', ' index_rec += '"' + query_field['fieldName'] + '": 1' for query_field in query_analysis['analyzedFields']: if query_field['fieldType'] is RANGE_TYPE: if len(index_rec) is not 1: index_rec += ', ' index_rec += '"' + query_field['fieldName'] + '": 1' index_rec += '}' return OrderedDict([('index',index_rec), ('shellCommand', self.generate_shell_command(collection_name, index_rec))])
Generates an ideal query recommendation
def handle_input(self, input_str, place=True, check=False): user = self.get_player() pos = self.validate_input(input_str) if pos[0] == 'u': self.undo(pos[1]) return pos if place: result = self.set_pos(pos, check) return result else: return pos
Transfer user input to valid chess position