code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def resync_package(ctx, opts, owner, repo, slug, skip_errors): """Resynchronise a package.""" click.echo( "Resynchonising the %(slug)s package ... " % {"slug": click.style(slug, bold=True)}, nl=False, ) context_msg = "Failed to resynchronise package!" with handle_api_exceptions( ctx, opts=opts, context_msg=context_msg, reraise_on_error=skip_errors ): with maybe_spinner(opts): api_resync_package(owner=owner, repo=repo, identifier=slug) click.secho("OK", fg="green")
Resynchronise a package.
def exit(self): """Terminate gdb process Returns: None""" if self.gdb_process: self.gdb_process.terminate() self.gdb_process.communicate() self.gdb_process = None return None
Terminate gdb process Returns: None
def _open_ftp(self): # type: () -> FTP """Open an ftp object for the file.""" ftp = self.fs._open_ftp() ftp.voidcmd(str("TYPE I")) return ftp
Open an ftp object for the file.
def _modify_new_lines(code_to_modify, offset, code_to_insert): """ Update new lines: the bytecode inserted should be the last instruction of the previous line. :return: bytes sequence of code with updated lines offsets """ # There's a nice overview of co_lnotab in # https://github.com/python/cpython/blob/3.6/Objects/lnotab_notes.txt new_list = list(code_to_modify.co_lnotab) if not new_list: # Could happen on a lambda (in this case, a breakpoint in the lambda should fallback to # tracing). return None # As all numbers are relative, what we want is to hide the code we inserted in the previous line # (it should be the last thing right before we increment the line so that we have a line event # right after the inserted code). bytecode_delta = len(code_to_insert) byte_increments = code_to_modify.co_lnotab[0::2] line_increments = code_to_modify.co_lnotab[1::2] if offset == 0: new_list[0] += bytecode_delta else: addr = 0 it = zip(byte_increments, line_increments) for i, (byte_incr, _line_incr) in enumerate(it): addr += byte_incr if addr == offset: new_list[i * 2] += bytecode_delta break return bytes(new_list)
Update new lines: the bytecode inserted should be the last instruction of the previous line. :return: bytes sequence of code with updated lines offsets
def _cast_dict(self, data_dict): """Internal method that makes sure any dictionary elements are properly cast into the correct types, instead of just treating everything like a string from the csv file. Args: data_dict: dictionary containing bro log data. Returns: Cleaned Data dict. """ for key, value in data_dict.iteritems(): data_dict[key] = self._cast_value(value) # Fixme: resp_body_data can be very large so removing it for now if 'resp_body_data' in data_dict: del data_dict['resp_body_data'] return data_dict
Internal method that makes sure any dictionary elements are properly cast into the correct types, instead of just treating everything like a string from the csv file. Args: data_dict: dictionary containing bro log data. Returns: Cleaned Data dict.
def renderHTTP(self, context): """ Render the wrapped resource if HTTPS is already being used, otherwise invoke a helper which may generate a redirect. """ request = IRequest(context) if request.isSecure(): renderer = self.wrappedResource else: renderer = _SecureWrapper(self.urlGenerator, self.wrappedResource) return renderer.renderHTTP(context)
Render the wrapped resource if HTTPS is already being used, otherwise invoke a helper which may generate a redirect.
def update(self, data, key): """Update a key's value's in a JSON file.""" og_data = self.read() og_data[key] = data self.write(og_data)
Update a key's value's in a JSON file.
def populate_parallel_text(extract_dir: str, file_sets: List[Tuple[str, str, str]], dest_prefix: str, keep_separate: bool, head_n: int = 0): """ Create raw parallel train, dev, or test files with a given prefix. :param extract_dir: Directory where raw files (inputs) are extracted. :param file_sets: Sets of files to use. :param dest_prefix: Prefix for output files. :param keep_separate: True if each file set (source-target pair) should have its own file (used for test sets). :param head_n: If N>0, use only the first N lines (used in test mode). """ source_out = None # type: IO[Any] target_out = None # type: IO[Any] lines_written = 0 # Single output file for each side if not keep_separate: source_dest = dest_prefix + SUFFIX_SRC_GZ target_dest = dest_prefix + SUFFIX_TRG_GZ logging.info("Populate: %s %s", source_dest, target_dest) source_out = gzip.open(source_dest, "wt", encoding="utf-8") target_out = gzip.open(target_dest, "wt", encoding="utf-8") for i, (source_fname, target_fname, text_type) in enumerate(file_sets): # One output file per input file for each side if keep_separate: if source_out: source_out.close() if target_out: target_out.close() source_dest = dest_prefix + str(i) + "." + SUFFIX_SRC_GZ target_dest = dest_prefix + str(i) + "." + SUFFIX_TRG_GZ logging.info("Populate: %s %s", source_dest, target_dest) source_out = gzip.open(source_dest, "wt", encoding="utf-8") target_out = gzip.open(target_dest, "wt", encoding="utf-8") for source_line, target_line in zip( plain_text_iter(os.path.join(extract_dir, source_fname), text_type, DATA_SRC), plain_text_iter(os.path.join(extract_dir, target_fname), text_type, DATA_TRG)): # Only write N lines total if requested, but reset per file when # keeping files separate if head_n > 0 and lines_written >= head_n: if keep_separate: lines_written = 0 break source_out.write("{}\n".format(source_line)) target_out.write("{}\n".format(target_line)) lines_written += 1 source_out.close() target_out.close()
Create raw parallel train, dev, or test files with a given prefix. :param extract_dir: Directory where raw files (inputs) are extracted. :param file_sets: Sets of files to use. :param dest_prefix: Prefix for output files. :param keep_separate: True if each file set (source-target pair) should have its own file (used for test sets). :param head_n: If N>0, use only the first N lines (used in test mode).
def show(self): """Print models sorted by metric.""" hyper_combos = itertools.product(*list(self.hyper_params.values())) if not self.models: c_values = [[idx + 1, list(val)] for idx, val in enumerate(hyper_combos)] print(H2OTwoDimTable( col_header=['Model', 'Hyperparameters: [' + ', '.join(list(self.hyper_params.keys())) + ']'], table_header='Grid Search of Model ' + self.model.__class__.__name__, cell_values=c_values)) else: print(self.sorted_metric_table())
Print models sorted by metric.
def jplace_split(self, original_jplace, cluster_dict): ''' To make GraftM more efficient, reads are dereplicated and merged into one file prior to placement using pplacer. This function separates the single jplace file produced by this process into the separate jplace files, one per input file (if multiple were provided) and backfills abundance (re-replicates?) into the placement file so analyses can be done using the placement files. Parameters ---------- original_jplace : dict (json) json .jplace file from the pplacer step. cluster_dict : dict dictionary stores information on pre-placement clustering Returns ------- A dict containing placement hashes to write to new jplace file. Each key represents a file alias ''' output_hash = {} for placement in original_jplace['placements']: # for each placement alias_placements_list = [] nm_dict = {} p = placement['p'] if 'nm' in placement.keys(): nm = placement['nm'] elif 'n' in placement.keys(): nm = placement['n'] else: raise Exception("Unexpected jplace format: Either 'nm' or 'n' are expected as keys in placement jplace .JSON file") for nm_entry in nm: nm_list = [] placement_read_name, plval = nm_entry read_alias_idx = placement_read_name.split('_')[-1] # Split the alias # index out of the read name, which # corresponds to the input file from # which the read originated. read_name = '_'.join(placement_read_name.split('_')[:-1]) read_cluster = cluster_dict[read_alias_idx][read_name] for read in read_cluster: nm_list.append([read.name, plval]) if read_alias_idx not in nm_dict: nm_dict[read_alias_idx] = nm_list else: nm_dict[read_alias_idx] += nm_entry for alias_idx, nm_list in nm_dict.iteritems(): placement_hash = {'p': p, 'nm': nm_list} if alias_idx not in output_hash: output_hash[alias_idx] = [placement_hash] else: output_hash[alias_idx].append(placement_hash) return output_hash
To make GraftM more efficient, reads are dereplicated and merged into one file prior to placement using pplacer. This function separates the single jplace file produced by this process into the separate jplace files, one per input file (if multiple were provided) and backfills abundance (re-replicates?) into the placement file so analyses can be done using the placement files. Parameters ---------- original_jplace : dict (json) json .jplace file from the pplacer step. cluster_dict : dict dictionary stores information on pre-placement clustering Returns ------- A dict containing placement hashes to write to new jplace file. Each key represents a file alias
def bytes_from_readable_size(C, size, suffix='B'): """given a readable_size (as produced by File.readable_size()), return the number of bytes.""" s = re.split("^([0-9\.]+)\s*([%s]?)%s?" % (''.join(C.SIZE_UNITS), suffix), size, flags=re.I) bytes, unit = round(float(s[1])), s[2].upper() while unit in C.SIZE_UNITS and C.SIZE_UNITS.index(unit) > 0: bytes *= 1024 unit = C.SIZE_UNITS[C.SIZE_UNITS.index(unit) - 1] return bytes
given a readable_size (as produced by File.readable_size()), return the number of bytes.
def count_variants_barplot(data): """ Return HTML for the Variant Counts barplot """ keys = OrderedDict() keys['snps'] = {'name': 'SNPs'} keys['mnps'] = {'name': 'MNPs'} keys['insertions'] = {'name': 'Insertions'} keys['deletions'] = {'name': 'Deletions'} keys['complex'] = {'name': 'Complex'} keys['symbolic'] = {'name': 'Symbolic'} keys['mixed'] = {'name': 'Mixed'} keys['nocalls'] = {'name': 'No-calls'} plot_conf = { 'id': 'gatk_varianteval_variant_plot', 'title': 'GATK VariantEval: Variant Counts', 'ylab': '# Variants', 'cpswitch_counts_label': 'Number of Variants' } return bargraph.plot(data, keys, plot_conf)
Return HTML for the Variant Counts barplot
def unescape(b, encoding): '''Unescape all string and unicode literals in bytes.''' return string_literal_re.sub( lambda m: unescape_string_literal(m.group(), encoding), b )
Unescape all string and unicode literals in bytes.
def _rescale(self, bands): """ Rescale bands """ self.output("Rescaling", normal=True, arrow=True) for key, band in enumerate(bands): self.output("band %s" % self.bands[key], normal=True, color='green', indent=1) bands[key] = sktransform.rescale(band, 2) bands[key] = (bands[key] * 65535).astype('uint16') return bands
Rescale bands
def execute(self): """ Execute the actions necessary to perform a `molecule init scenario` and returns None. :return: None """ scenario_name = self._command_args['scenario_name'] role_name = os.getcwd().split(os.sep)[-1] role_directory = util.abs_path(os.path.join(os.getcwd(), os.pardir)) msg = 'Initializing new scenario {}...'.format(scenario_name) LOG.info(msg) molecule_directory = config.molecule_directory( os.path.join(role_directory, role_name)) scenario_directory = os.path.join(molecule_directory, scenario_name) scenario_base_directory = os.path.dirname(scenario_directory) if os.path.isdir(scenario_directory): msg = ('The directory molecule/{} exists. ' 'Cannot create new scenario.').format(scenario_name) util.sysexit_with_message(msg) scenario_base_directory = os.path.join(role_directory, role_name) templates = [ 'scenario/driver/{driver_name}'.format(**self._command_args), 'scenario/verifier/{verifier_name}'.format(**self._command_args), ] for template in templates: self._process_templates(template, self._command_args, scenario_base_directory) self._process_templates('molecule', self._command_args, role_directory) role_directory = os.path.join(role_directory, role_name) msg = 'Initialized scenario in {} successfully.'.format( scenario_directory) LOG.success(msg)
Execute the actions necessary to perform a `molecule init scenario` and returns None. :return: None
def run_doxygen(folder): """Run the doxygen make command in the designated folder.""" try: retcode = subprocess.call("cd %s; make doxygen" % folder, shell=True) if retcode < 0: sys.stderr.write("doxygen terminated by signal %s" % (-retcode)) except OSError as e: sys.stderr.write("doxygen execution failed: %s" % e)
Run the doxygen make command in the designated folder.
def l2norm_squared(a): """ L2 normalize squared """ value = 0 for i in xrange(a.shape[1]): value += np.dot(a[:,i],a[:,i]) return value
L2 normalize squared
def close_right(self): """ Closes every editors tabs on the left of the current one. """ current_widget = self.widget(self.tab_under_menu()) index = self.indexOf(current_widget) if self._try_close_dirty_tabs(tab_range=range(index + 1, self.count())): while True: widget = self.widget(self.count() - 1) if widget != current_widget: self.remove_tab(self.count() - 1) else: break
Closes every editors tabs on the left of the current one.
def h(self): r""" Returns the step size to be used in numerical differentiation with respect to the model parameters. The step size is given as a vector with length ``n_modelparams`` so that each model parameter can be weighted independently. """ if np.size(self._h) > 1: assert np.size(self._h) == self.n_modelparams return self._h else: return self._h * np.ones(self.n_modelparams)
r""" Returns the step size to be used in numerical differentiation with respect to the model parameters. The step size is given as a vector with length ``n_modelparams`` so that each model parameter can be weighted independently.
def persist_database(metamodel, path, mode='w'): ''' Persist all instances, class definitions and association definitions in a *metamodel* by serializing them and saving to a *path* on disk. ''' with open(path, mode) as f: for kind in sorted(metamodel.metaclasses.keys()): metaclass = metamodel.metaclasses[kind] s = serialize_class(metaclass.clazz) f.write(s) for index_name, attribute_names in metaclass.indices.items(): attribute_names = ', '.join(attribute_names) s = 'CREATE UNIQUE INDEX %s ON %s (%s);\n' % (index_name, metaclass.kind, attribute_names) f.write(s) for ass in sorted(metamodel.associations, key=lambda x: x.rel_id): s = serialize_association(ass) f.write(s) for inst in metamodel.instances: s = serialize_instance(inst) f.write(s)
Persist all instances, class definitions and association definitions in a *metamodel* by serializing them and saving to a *path* on disk.
def crop(img, center, sz, mode='constant'): """ crop sz from ij as center :param img: :param center: ij :param sz: :param mode: :return: """ center = np.array(center) sz = np.array(sz) istart = (center - sz / 2.).astype('int32') iend = istart + sz imsz = img.shape[:2] if np.any(istart < 0) or np.any(iend > imsz): # padding padwidth = [(np.minimum(0, istart[0]), np.maximum(0, iend[0]-imsz[0])), (np.minimum(0, istart[1]), np.maximum(0, iend[1]-imsz[1]))] padwidth += [(0, 0)] * (len(img.shape) - 2) img = np.pad(img, padwidth, mode=mode) istart = (np.maximum(0, istart[0]), np.maximum(0, istart[1])) return img[istart[0]:istart[0]+sz[0], istart[1]:istart[1]+sz[1]] return img[istart[0]:iend[0], istart[1]:iend[1]]
crop sz from ij as center :param img: :param center: ij :param sz: :param mode: :return:
def _notify_exit_thread(self, event): """ Notify the termination of a thread. This is done automatically by the L{Debug} class, you shouldn't need to call it yourself. @type event: L{ExitThreadEvent} @param event: Exit thread event. @rtype: bool @return: C{True} to call the user-defined handle, C{False} otherwise. """ dwThreadId = event.get_tid() ## if self.has_thread(dwThreadId): # XXX this would trigger a scan if self._has_thread_id(dwThreadId): self._del_thread(dwThreadId) return True
Notify the termination of a thread. This is done automatically by the L{Debug} class, you shouldn't need to call it yourself. @type event: L{ExitThreadEvent} @param event: Exit thread event. @rtype: bool @return: C{True} to call the user-defined handle, C{False} otherwise.
def availableRoles(self): ''' Returns the set of roles for this event. Since roles are not always custom specified for event, this looks for the set of available roles in multiple places. If no roles are found, then the method returns an empty list, in which case it can be assumed that the event's registration is not role-specific. ''' eventRoles = self.eventrole_set.filter(capacity__gt=0) if eventRoles.count() > 0: return [x.role for x in eventRoles] elif isinstance(self,Series): return self.classDescription.danceTypeLevel.danceType.roles.all() return []
Returns the set of roles for this event. Since roles are not always custom specified for event, this looks for the set of available roles in multiple places. If no roles are found, then the method returns an empty list, in which case it can be assumed that the event's registration is not role-specific.
def create_thumbnail(uuid, thumbnail_width): """Create the thumbnail for an image.""" # size = '!' + thumbnail_width + ',' size = thumbnail_width + ',' # flask_iiif doesn't support ! at the moment thumbnail = IIIFImageAPI.get('v2', uuid, size, 0, 'default', 'jpg')
Create the thumbnail for an image.
def size(args): """ %prog size fastqfile Find the total base pairs in a list of fastq files """ p = OptionParser(size.__doc__) opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) total_size = total_numrecords = 0 for f in args: cur_size = cur_numrecords = 0 for rec in iter_fastq(f): if not rec: break cur_numrecords += 1 cur_size += len(rec) print(" ".join(str(x) for x in \ (op.basename(f), cur_numrecords, cur_size))) total_numrecords += cur_numrecords total_size += cur_size if len(args) > 1: print(" ".join(str(x) for x in \ ("Total", total_numrecords, total_size)))
%prog size fastqfile Find the total base pairs in a list of fastq files
def chk_col_numbers(line_num, num_cols, tax_id_col, id_col, symbol_col): """ Check that none of the input column numbers is out of range. (Instead of defining this function, we could depend on Python's built-in IndexError exception for this issue, but the IndexError exception wouldn't include line number information, which is helpful for users to find exactly which line is the culprit.) """ bad_col = '' if tax_id_col >= num_cols: bad_col = 'tax_id_col' elif id_col >= num_cols: bad_col = 'discontinued_id_col' elif symbol_col >= num_cols: bad_col = 'discontinued_symbol_col' if bad_col: raise Exception( 'Input file line #%d: column number of %s is out of range' % (line_num, bad_col))
Check that none of the input column numbers is out of range. (Instead of defining this function, we could depend on Python's built-in IndexError exception for this issue, but the IndexError exception wouldn't include line number information, which is helpful for users to find exactly which line is the culprit.)
def get_common_password_hash(self, salt): """x = H(s | H(I | ":" | P)) :param int salt: :rtype: int """ password = self._password if password is None: raise SRPException('User password should be in context for this scenario.') return self.hash(salt, self.hash(self._user, password, joiner=':'))
x = H(s | H(I | ":" | P)) :param int salt: :rtype: int
def guess_version_by_running_live_package( pkg_key, default="?" ): # type: (str,str) -> Any """Guess the version of a pkg when pip doesn't provide it. :param str pkg_key: key of the package :param str default: default version to return if unable to find :returns: version :rtype: string """ try: m = import_module(pkg_key) except ImportError: return default else: return getattr(m, "__version__", default)
Guess the version of a pkg when pip doesn't provide it. :param str pkg_key: key of the package :param str default: default version to return if unable to find :returns: version :rtype: string
def list(region=None, key=None, keyid=None, profile=None): ''' List all buckets owned by the authenticated sender of the request. Returns list of buckets CLI Example: .. code-block:: yaml Owner: {...} Buckets: - {...} - {...} ''' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) buckets = conn.list_buckets() if not bool(buckets.get('Buckets')): log.warning('No buckets found') if 'ResponseMetadata' in buckets: del buckets['ResponseMetadata'] return buckets except ClientError as e: return {'error': __utils__['boto3.get_error'](e)}
List all buckets owned by the authenticated sender of the request. Returns list of buckets CLI Example: .. code-block:: yaml Owner: {...} Buckets: - {...} - {...}
def manage_request_types_view(request): ''' Manage requests. Display a list of request types with links to edit them. Also display a link to add a new request type. Restricted to presidents and superadmins. ''' request_types = RequestType.objects.all() return render_to_response('manage_request_types.html', { 'page_name': "Admin - Manage Request Types", 'request_types': request_types }, context_instance=RequestContext(request))
Manage requests. Display a list of request types with links to edit them. Also display a link to add a new request type. Restricted to presidents and superadmins.
def pip_search(self, search_string=None): """Search for pip packages in PyPI matching `search_string`.""" extra_args = ['search', search_string] return self._call_pip(name='root', extra_args=extra_args, callback=self._pip_search)
Search for pip packages in PyPI matching `search_string`.
def first_rec(ofile, Rec, file_type): """ opens the file ofile as a magic template file with headers as the keys to Rec """ keylist = [] opened = False # sometimes Windows needs a little extra time to open a file # or else it throws an error while not opened: try: pmag_out = open(ofile, 'w') opened = True except IOError: time.sleep(1) outstring = "tab \t" + file_type + "\n" pmag_out.write(outstring) keystring = "" for key in list(Rec.keys()): keystring = keystring + '\t' + key.strip() keylist.append(key) keystring = keystring + '\n' pmag_out.write(keystring[1:]) pmag_out.close() return keylist
opens the file ofile as a magic template file with headers as the keys to Rec
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=3, copy=True, raise_if_out_of_image=False): """ Draw the keypoint onto a given image. The keypoint is drawn as a square. Parameters ---------- image : (H,W,3) ndarray The image onto which to draw the keypoint. color : int or list of int or tuple of int or (3,) ndarray, optional The RGB color of the keypoint. If a single int ``C``, then that is equivalent to ``(C,C,C)``. alpha : float, optional The opacity of the drawn keypoint, where ``1.0`` denotes a fully visible keypoint and ``0.0`` an invisible one. size : int, optional The size of the keypoint. If set to ``S``, each square will have size ``S x S``. copy : bool, optional Whether to copy the image before drawing the keypoint. raise_if_out_of_image : bool, optional Whether to raise an exception if the keypoint is outside of the image. Returns ------- image : (H,W,3) ndarray Image with drawn keypoint. """ if copy: image = np.copy(image) if image.ndim == 2: assert ia.is_single_number(color), ( "Got a 2D image. Expected then 'color' to be a single number, " "but got %s." % (str(color),)) elif image.ndim == 3 and ia.is_single_number(color): color = [color] * image.shape[-1] input_dtype = image.dtype alpha_color = color if alpha < 0.01: # keypoint invisible, nothing to do return image elif alpha > 0.99: alpha = 1 else: image = image.astype(np.float32, copy=False) alpha_color = alpha * np.array(color) height, width = image.shape[0:2] y, x = self.y_int, self.x_int x1 = max(x - size//2, 0) x2 = min(x + 1 + size//2, width) y1 = max(y - size//2, 0) y2 = min(y + 1 + size//2, height) x1_clipped, x2_clipped = np.clip([x1, x2], 0, width) y1_clipped, y2_clipped = np.clip([y1, y2], 0, height) x1_clipped_ooi = (x1_clipped < 0 or x1_clipped >= width) x2_clipped_ooi = (x2_clipped < 0 or x2_clipped >= width+1) y1_clipped_ooi = (y1_clipped < 0 or y1_clipped >= height) y2_clipped_ooi = (y2_clipped < 0 or y2_clipped >= height+1) x_ooi = (x1_clipped_ooi and x2_clipped_ooi) y_ooi = (y1_clipped_ooi and y2_clipped_ooi) x_zero_size = (x2_clipped - x1_clipped) < 1 # min size is 1px y_zero_size = (y2_clipped - y1_clipped) < 1 if not x_ooi and not y_ooi and not x_zero_size and not y_zero_size: if alpha == 1: image[y1_clipped:y2_clipped, x1_clipped:x2_clipped] = color else: image[y1_clipped:y2_clipped, x1_clipped:x2_clipped] = ( (1 - alpha) * image[y1_clipped:y2_clipped, x1_clipped:x2_clipped] + alpha_color ) else: if raise_if_out_of_image: raise Exception( "Cannot draw keypoint x=%.8f, y=%.8f on image with " "shape %s." % (y, x, image.shape)) if image.dtype.name != input_dtype.name: if input_dtype.name == "uint8": image = np.clip(image, 0, 255, out=image) image = image.astype(input_dtype, copy=False) return image
Draw the keypoint onto a given image. The keypoint is drawn as a square. Parameters ---------- image : (H,W,3) ndarray The image onto which to draw the keypoint. color : int or list of int or tuple of int or (3,) ndarray, optional The RGB color of the keypoint. If a single int ``C``, then that is equivalent to ``(C,C,C)``. alpha : float, optional The opacity of the drawn keypoint, where ``1.0`` denotes a fully visible keypoint and ``0.0`` an invisible one. size : int, optional The size of the keypoint. If set to ``S``, each square will have size ``S x S``. copy : bool, optional Whether to copy the image before drawing the keypoint. raise_if_out_of_image : bool, optional Whether to raise an exception if the keypoint is outside of the image. Returns ------- image : (H,W,3) ndarray Image with drawn keypoint.
def push_notification_devices_destroy_many(self, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/push_notification_devices#bulk-unregister-push-notification-devices" api_path = "/api/v2/push_notification_devices/destroy_many.json" return self.call(api_path, method="POST", data=data, **kwargs)
https://developer.zendesk.com/rest_api/docs/core/push_notification_devices#bulk-unregister-push-notification-devices
def _set_redist_rip(self, v, load=False): """ Setter method for redist_rip, mapped from YANG variable /isis_state/router_isis_config/is_address_family_v6/redist_rip (container) If this variable is read-only (config: false) in the source YANG file, then _set_redist_rip is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_redist_rip() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=redist_rip.redist_rip, is_container='container', presence=False, yang_name="redist-rip", rest_name="redist-rip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-redistribution-redist-rip-1'}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """redist_rip must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=redist_rip.redist_rip, is_container='container', presence=False, yang_name="redist-rip", rest_name="redist-rip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-redistribution-redist-rip-1'}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""", }) self.__redist_rip = t if hasattr(self, '_set'): self._set()
Setter method for redist_rip, mapped from YANG variable /isis_state/router_isis_config/is_address_family_v6/redist_rip (container) If this variable is read-only (config: false) in the source YANG file, then _set_redist_rip is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_redist_rip() directly.
def clean_inputs(data): """Clean BED input files to avoid overlapping segments that cause downstream issues. Per-merges inputs to avoid needing to call multiple times during later parallel steps. """ if not utils.get_in(data, ("config", "algorithm", "variant_regions_orig")): data["config"]["algorithm"]["variant_regions_orig"] = dd.get_variant_regions(data) clean_vr = clean_file(dd.get_variant_regions(data), data, prefix="cleaned-") merged_vr = merge_overlaps(clean_vr, data) data["config"]["algorithm"]["variant_regions"] = clean_vr data["config"]["algorithm"]["variant_regions_merged"] = merged_vr if dd.get_coverage(data): if not utils.get_in(data, ("config", "algorithm", "coverage_orig")): data["config"]["algorithm"]["coverage_orig"] = dd.get_coverage(data) clean_cov_bed = clean_file(dd.get_coverage(data), data, prefix="cov-", simple=True) merged_cov_bed = merge_overlaps(clean_cov_bed, data) data["config"]["algorithm"]["coverage"] = clean_cov_bed data["config"]["algorithm"]["coverage_merged"] = merged_cov_bed if 'seq2c' in get_svcallers(data): seq2c_ready_bed = prep_seq2c_bed(data) if not seq2c_ready_bed: logger.warning("Can't run Seq2C without a svregions or variant_regions BED file") else: data["config"]["algorithm"]["seq2c_bed_ready"] = seq2c_ready_bed elif regions.get_sv_bed(data): dd.set_sv_regions(data, clean_file(regions.get_sv_bed(data), data, prefix="svregions-")) return data
Clean BED input files to avoid overlapping segments that cause downstream issues. Per-merges inputs to avoid needing to call multiple times during later parallel steps.
def Decompress(self, compressed_data): """Decompresses the compressed data. Args: compressed_data (bytes): compressed data. Returns: tuple(bytes, bytes): uncompressed data and remaining compressed data. Raises: BackEndError: if the XZ compressed stream cannot be decompressed. """ try: if hasattr(lzma, 'LZMA_VERSION'): # Note that we cannot use max_length=0 here due to different # versions of the lzma code. uncompressed_data = self._lzma_decompressor.decompress( compressed_data, 0) else: uncompressed_data = self._lzma_decompressor.decompress(compressed_data) remaining_compressed_data = getattr( self._lzma_decompressor, 'unused_data', b'') except (EOFError, IOError, LZMAError) as exception: raise errors.BackEndError(( 'Unable to decompress XZ compressed stream with error: ' '{0!s}.').format(exception)) return uncompressed_data, remaining_compressed_data
Decompresses the compressed data. Args: compressed_data (bytes): compressed data. Returns: tuple(bytes, bytes): uncompressed data and remaining compressed data. Raises: BackEndError: if the XZ compressed stream cannot be decompressed.
def GetDatabaseAccount(self, url_connection=None): """Gets database account info. :return: The Database Account. :rtype: documents.DatabaseAccount """ if url_connection is None: url_connection = self.url_connection initial_headers = dict(self.default_headers) headers = base.GetHeaders(self, initial_headers, 'get', '', # path '', # id '', # type {}) request = request_object._RequestObject('databaseaccount', documents._OperationType.Read, url_connection) result, self.last_response_headers = self.__Get('', request, headers) database_account = documents.DatabaseAccount() database_account.DatabasesLink = '/dbs/' database_account.MediaLink = '/media/' if (http_constants.HttpHeaders.MaxMediaStorageUsageInMB in self.last_response_headers): database_account.MaxMediaStorageUsageInMB = ( self.last_response_headers[ http_constants.HttpHeaders.MaxMediaStorageUsageInMB]) if (http_constants.HttpHeaders.CurrentMediaStorageUsageInMB in self.last_response_headers): database_account.CurrentMediaStorageUsageInMB = ( self.last_response_headers[ http_constants.HttpHeaders.CurrentMediaStorageUsageInMB]) database_account.ConsistencyPolicy = result.get(constants._Constants.UserConsistencyPolicy) # WritableLocations and ReadableLocations fields will be available only for geo-replicated database accounts if constants._Constants.WritableLocations in result: database_account._WritableLocations = result[constants._Constants.WritableLocations] if constants._Constants.ReadableLocations in result: database_account._ReadableLocations = result[constants._Constants.ReadableLocations] if constants._Constants.EnableMultipleWritableLocations in result: database_account._EnableMultipleWritableLocations = result[constants._Constants.EnableMultipleWritableLocations] self._useMultipleWriteLocations = self.connection_policy.UseMultipleWriteLocations and database_account._EnableMultipleWritableLocations return database_account
Gets database account info. :return: The Database Account. :rtype: documents.DatabaseAccount
def count_leaves(x): """ Return the number of non-sequence items in a given recursive sequence. """ if hasattr(x, 'keys'): x = list(x.values()) if hasattr(x, '__getitem__'): return sum(map(count_leaves, x)) return 1
Return the number of non-sequence items in a given recursive sequence.
def runCLI(): """ The starting point for the execution of the Scrapple command line tool. runCLI uses the docstring as the usage description for the scrapple command. \ The class for the required command is selected by a dynamic dispatch, and the \ command is executed through the execute_command() method of the command class. """ args = docopt(__doc__, version='0.3.0') try: check_arguments(args) command_list = ['genconfig', 'run', 'generate'] select = itemgetter('genconfig', 'run', 'generate') selectedCommand = command_list[select(args).index(True)] cmdClass = get_command_class(selectedCommand) obj = cmdClass(args) obj.execute_command() except POSSIBLE_EXCEPTIONS as e: print('\n', e, '\n')
The starting point for the execution of the Scrapple command line tool. runCLI uses the docstring as the usage description for the scrapple command. \ The class for the required command is selected by a dynamic dispatch, and the \ command is executed through the execute_command() method of the command class.
def delete_namespaced_deployment(self, name, namespace, **kwargs): # noqa: E501 """delete_namespaced_deployment # noqa: E501 delete a Deployment # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_deployment(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Deployment (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :param V1DeleteOptions body: :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_namespaced_deployment_with_http_info(name, namespace, **kwargs) # noqa: E501 else: (data) = self.delete_namespaced_deployment_with_http_info(name, namespace, **kwargs) # noqa: E501 return data
delete_namespaced_deployment # noqa: E501 delete a Deployment # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_deployment(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Deployment (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :param V1DeleteOptions body: :return: V1Status If the method is called asynchronously, returns the request thread.
def wrap_handler(self, handler, context_switcher): """Enable/Disable handler.""" context_switcher.add_context_in(lambda: LOGGER.addHandler(self.handler)) context_switcher.add_context_out(lambda: LOGGER.removeHandler(self.handler))
Enable/Disable handler.
def convert_parameters(self, request, *args, **kwargs): ''' Iterates the urlparams and converts them according to the type hints in the current view function. This is the primary function of the class. ''' args = list(args) urlparam_i = 0 parameters = self.view_parameters.get(request.method.lower()) or self.view_parameters.get(None) if parameters is not None: # add urlparams into the arguments and convert the values for parameter_i, parameter in enumerate(parameters): # skip request object, *args, **kwargs if parameter_i == 0 or parameter.kind is inspect.Parameter.VAR_POSITIONAL or parameter.kind is inspect.Parameter.VAR_KEYWORD: pass # value in kwargs? elif parameter.name in kwargs: kwargs[parameter.name] = self.convert_value(kwargs[parameter.name], parameter, request) # value in args? elif parameter_i - 1 < len(args): args[parameter_i - 1] = self.convert_value(args[parameter_i - 1], parameter, request) # urlparam value? elif urlparam_i < len(request.dmp.urlparams): kwargs[parameter.name] = self.convert_value(request.dmp.urlparams[urlparam_i], parameter, request) urlparam_i += 1 # can we assign a default value? elif parameter.default is not inspect.Parameter.empty: kwargs[parameter.name] = self.convert_value(parameter.default, parameter, request) # fallback is None else: kwargs[parameter.name] = self.convert_value(None, parameter, request) return args, kwargs
Iterates the urlparams and converts them according to the type hints in the current view function. This is the primary function of the class.
def metadata_to_buffers(metadata): """ Transform a dict of metadata into a sequence of buffers. :param metadata: The metadata, as a dict. :returns: A list of buffers. """ results = [] for key, value in metadata.items(): assert len(key) < 256 assert len(value) < 2 ** 32 results.extend([ struct.pack('!B', len(key)), key, struct.pack('!I', len(value)), value, ]) return results
Transform a dict of metadata into a sequence of buffers. :param metadata: The metadata, as a dict. :returns: A list of buffers.
def fit_overlays(self, text, run_matchers=None, **kw): """ First all matchers will run and then I will try to combine them. Use run_matchers to force running(True) or not running(False) the matchers. See ListMatcher for arguments. """ self._maybe_run_matchers(text, run_matchers) for i in self._list_match.fit_overlay(text, **kw): yield i
First all matchers will run and then I will try to combine them. Use run_matchers to force running(True) or not running(False) the matchers. See ListMatcher for arguments.
def groupfinder(userid, request): """ Default groupfinder implementaion for pyramid applications :param userid: :param request: :return: """ if userid and hasattr(request, "user") and request.user: groups = ["group:%s" % g.id for g in request.user.groups] return groups return []
Default groupfinder implementaion for pyramid applications :param userid: :param request: :return:
def update(self, pbar): 'Updates the widget with the current SI prefixed speed.' if pbar.seconds_elapsed < 2e-6 or pbar.currval < 2e-6: # =~ 0 scaled = power = 0 else: speed = pbar.currval / pbar.seconds_elapsed power = int(math.log(speed, 1000)) scaled = speed / 1000.**power return self._format % (scaled, self.prefixes[power], self.unit)
Updates the widget with the current SI prefixed speed.
def uniform_random_global_network(loc=2000, scale=250, n=100): """ Returns an array of `n` uniformally randomly distributed `shapely.geometry.Point` objects. """ arr = (np.random.normal(loc, scale, n)).astype(int) return pd.DataFrame(data={'mock_variable': arr, 'from': uniform_random_global_points(n), 'to': uniform_random_global_points(n)})
Returns an array of `n` uniformally randomly distributed `shapely.geometry.Point` objects.
def search_profiles( self, parent, request_metadata, profile_query=None, page_size=None, offset=None, disable_spell_check=None, order_by=None, case_sensitive_sort=None, histogram_queries=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Searches for profiles within a tenant. For example, search by raw queries "software engineer in Mountain View" or search by structured filters (location filter, education filter, etc.). See ``SearchProfilesRequest`` for more information. Example: >>> from google.cloud import talent_v4beta1 >>> >>> client = talent_v4beta1.ProfileServiceClient() >>> >>> parent = client.tenant_path('[PROJECT]', '[TENANT]') >>> >>> # TODO: Initialize `request_metadata`: >>> request_metadata = {} >>> >>> # Iterate over all results >>> for element in client.search_profiles(parent, request_metadata): ... # process element ... pass >>> >>> >>> # Alternatively: >>> >>> # Iterate over results one page at a time >>> for page in client.search_profiles(parent, request_metadata).pages: ... for element in page: ... # process element ... pass Args: parent (str): Required. The resource name of the tenant to search within. The format is "projects/{project\_id}/tenants/{tenant\_id}", for example, "projects/api-test-project/tenants/foo". request_metadata (Union[dict, ~google.cloud.talent_v4beta1.types.RequestMetadata]): Required. The meta information collected about the profile search user. This is used to improve the search quality of the service. These values are provided by users, and must be precise and consistent. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.talent_v4beta1.types.RequestMetadata` profile_query (Union[dict, ~google.cloud.talent_v4beta1.types.ProfileQuery]): Optional. Search query to execute. See ``ProfileQuery`` for more details. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.talent_v4beta1.types.ProfileQuery` page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page streaming is performed per-page, this determines the maximum number of resources in a page. offset (int): Optional. An integer that specifies the current offset (that is, starting result) in search results. This field is only considered if ``page_token`` is unset. The maximum allowed value is 5000. Otherwise an error is thrown. For example, 0 means to search from the first profile, and 10 means to search from the 11th profile. This can be used for pagination, for example pageSize = 10 and offset = 10 means to search from the second page. disable_spell_check (bool): Optional. This flag controls the spell-check feature. If ``false``, the service attempts to correct a misspelled query. For example, "enginee" is corrected to "engineer". order_by (str): Optional. The criteria that determines how search results are sorted. Defaults is "relevance desc" if no value is specified. Supported options are: - "relevance desc": By descending relevance, as determined by the API algorithms. - "update\_date desc": Sort by ``Profile.update_date`` in descending order (recently updated profiles first). - "create\_date desc": Sort by ``Profile.create_date`` in descending order (recently created profiles first). - "first\_name": Sort by ``PersonStrcuturedName.given_name`` in ascending order. - "first\_name desc": Sort by ``PersonStrcuturedName.given_name`` in descending order. - "last\_name": Sort by ``PersonStrcuturedName.family_name`` in ascending order. - "last\_name desc": Sort by ``PersonStrcuturedName.family_name`` in ascending order. case_sensitive_sort (bool): Optional. When sort by field is based on alphabetical order, sort values case sensitively (based on ASCII) when the value is set to true. Default value is case in-sensitive sort (false). histogram_queries (list[Union[dict, ~google.cloud.talent_v4beta1.types.HistogramQuery]]): Optional. A list of expressions specifies histogram requests against matching profiles for ``SearchProfilesRequest``. The expression syntax looks like a function definition with optional parameters. Function syntax: function\_name(histogram\_facet[, list of buckets]) Data types: - Histogram facet: facet names with format [a-zA-Z][a-zA-Z0-9\_]+. - String: string like "any string with backslash escape for quote(")." - Number: whole number and floating point number like 10, -1 and -0.01. - List: list of elements with comma(,) separator surrounded by square brackets. For example, [1, 2, 3] and ["one", "two", "three"]. Built-in constants: - MIN (minimum number similar to java Double.MIN\_VALUE) - MAX (maximum number similar to java Double.MAX\_VALUE) Built-in functions: - bucket(start, end[, label]) Bucket build-in function creates a bucket with range of \`start, end). Note that the end is exclusive. For example, bucket(1, MAX, "positive number") or bucket(1, 10). Histogram Facets: - admin1: Admin1 is a global placeholder for referring to state, province, or the particular term a country uses to define the geographic structure below the country level. Examples include states codes such as "CA", "IL", "NY", and provinces, such as "BC". - locality: Locality is a global placeholder for referring to city, town, or the particular term a country uses to define the geographic structure below the admin1 level. Examples include city names such as "Mountain View" and "New York". - extended\_locality: Extended locality is concatenated version of admin1 and locality with comma separator. For example, "Mountain View, CA" and "New York, NY". - postal\_code: Postal code of profile which follows locale code. - country: Country code (ISO-3166-1 alpha-2 code) of profile, such as US, JP, GB. - job\_title: Normalized job titles specified in EmploymentHistory. - company\_name: Normalized company name of profiles to match on. - institution: The school name. For example, "MIT", "University of California, Berkeley" - degree: Highest education degree in ISCED code. Each value in degree covers specific level of education, without any expansion to upper nor lower levels of education degree. - experience\_in\_months: experience in months. 0 means 0 month to 1 month (exclusive). - application\_date: The application date specifies application start dates. See [ApplicationDateFilter\` for more details. - application\_outcome\_reason: The application outcome reason specifies the outcome reasons of job application. See ``ApplicationOutcomeReasonFilter`` for more details. - application\_last\_stage: The application last stage specifies the last stage of job application. See ``ApplicationLastStageFilter`` for more details. - application\_job\_title: The application job title specifies the job applied for in the application. See ``ApplicationJobFilter`` for more details. - application\_status: The application status specifies the status of job application. See ``ApplicationStatusFilter`` for more details. - hirable\_status: Hirable status specifies the profile's hirable status. - string\_custom\_attribute: String custom attributes. Values can be accessed via square bracket notation like string\_custom\_attribute["key1"]. - numeric\_custom\_attribute: Numeric custom attributes. Values can be accessed via square bracket notation like numeric\_custom\_attribute["key1"]. Example expressions: - count(admin1) - count(experience\_in\_months, [bucket(0, 12, "1 year"), bucket(12, 36, "1-3 years"), bucket(36, MAX, "3+ years")]) - count(string\_custom\_attribute["assigned\_recruiter"]) - count(numeric\_custom\_attribute["favorite\_number"], [bucket(MIN, 0, "negative"), bucket(0, MAX, "non-negative")]) If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.talent_v4beta1.types.HistogramQuery` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.gax.PageIterator` instance. By default, this is an iterable of :class:`~google.cloud.talent_v4beta1.types.HistogramQueryResult` instances. This object can also be configured to iterate over the pages of the response through the `options` parameter. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "search_profiles" not in self._inner_api_calls: self._inner_api_calls[ "search_profiles" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.search_profiles, default_retry=self._method_configs["SearchProfiles"].retry, default_timeout=self._method_configs["SearchProfiles"].timeout, client_info=self._client_info, ) request = profile_service_pb2.SearchProfilesRequest( parent=parent, request_metadata=request_metadata, profile_query=profile_query, page_size=page_size, offset=offset, disable_spell_check=disable_spell_check, order_by=order_by, case_sensitive_sort=case_sensitive_sort, histogram_queries=histogram_queries, ) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls["search_profiles"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field="histogram_query_results", request_token_field="page_token", response_token_field="next_page_token", ) return iterator
Searches for profiles within a tenant. For example, search by raw queries "software engineer in Mountain View" or search by structured filters (location filter, education filter, etc.). See ``SearchProfilesRequest`` for more information. Example: >>> from google.cloud import talent_v4beta1 >>> >>> client = talent_v4beta1.ProfileServiceClient() >>> >>> parent = client.tenant_path('[PROJECT]', '[TENANT]') >>> >>> # TODO: Initialize `request_metadata`: >>> request_metadata = {} >>> >>> # Iterate over all results >>> for element in client.search_profiles(parent, request_metadata): ... # process element ... pass >>> >>> >>> # Alternatively: >>> >>> # Iterate over results one page at a time >>> for page in client.search_profiles(parent, request_metadata).pages: ... for element in page: ... # process element ... pass Args: parent (str): Required. The resource name of the tenant to search within. The format is "projects/{project\_id}/tenants/{tenant\_id}", for example, "projects/api-test-project/tenants/foo". request_metadata (Union[dict, ~google.cloud.talent_v4beta1.types.RequestMetadata]): Required. The meta information collected about the profile search user. This is used to improve the search quality of the service. These values are provided by users, and must be precise and consistent. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.talent_v4beta1.types.RequestMetadata` profile_query (Union[dict, ~google.cloud.talent_v4beta1.types.ProfileQuery]): Optional. Search query to execute. See ``ProfileQuery`` for more details. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.talent_v4beta1.types.ProfileQuery` page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page streaming is performed per-page, this determines the maximum number of resources in a page. offset (int): Optional. An integer that specifies the current offset (that is, starting result) in search results. This field is only considered if ``page_token`` is unset. The maximum allowed value is 5000. Otherwise an error is thrown. For example, 0 means to search from the first profile, and 10 means to search from the 11th profile. This can be used for pagination, for example pageSize = 10 and offset = 10 means to search from the second page. disable_spell_check (bool): Optional. This flag controls the spell-check feature. If ``false``, the service attempts to correct a misspelled query. For example, "enginee" is corrected to "engineer". order_by (str): Optional. The criteria that determines how search results are sorted. Defaults is "relevance desc" if no value is specified. Supported options are: - "relevance desc": By descending relevance, as determined by the API algorithms. - "update\_date desc": Sort by ``Profile.update_date`` in descending order (recently updated profiles first). - "create\_date desc": Sort by ``Profile.create_date`` in descending order (recently created profiles first). - "first\_name": Sort by ``PersonStrcuturedName.given_name`` in ascending order. - "first\_name desc": Sort by ``PersonStrcuturedName.given_name`` in descending order. - "last\_name": Sort by ``PersonStrcuturedName.family_name`` in ascending order. - "last\_name desc": Sort by ``PersonStrcuturedName.family_name`` in ascending order. case_sensitive_sort (bool): Optional. When sort by field is based on alphabetical order, sort values case sensitively (based on ASCII) when the value is set to true. Default value is case in-sensitive sort (false). histogram_queries (list[Union[dict, ~google.cloud.talent_v4beta1.types.HistogramQuery]]): Optional. A list of expressions specifies histogram requests against matching profiles for ``SearchProfilesRequest``. The expression syntax looks like a function definition with optional parameters. Function syntax: function\_name(histogram\_facet[, list of buckets]) Data types: - Histogram facet: facet names with format [a-zA-Z][a-zA-Z0-9\_]+. - String: string like "any string with backslash escape for quote(")." - Number: whole number and floating point number like 10, -1 and -0.01. - List: list of elements with comma(,) separator surrounded by square brackets. For example, [1, 2, 3] and ["one", "two", "three"]. Built-in constants: - MIN (minimum number similar to java Double.MIN\_VALUE) - MAX (maximum number similar to java Double.MAX\_VALUE) Built-in functions: - bucket(start, end[, label]) Bucket build-in function creates a bucket with range of \`start, end). Note that the end is exclusive. For example, bucket(1, MAX, "positive number") or bucket(1, 10). Histogram Facets: - admin1: Admin1 is a global placeholder for referring to state, province, or the particular term a country uses to define the geographic structure below the country level. Examples include states codes such as "CA", "IL", "NY", and provinces, such as "BC". - locality: Locality is a global placeholder for referring to city, town, or the particular term a country uses to define the geographic structure below the admin1 level. Examples include city names such as "Mountain View" and "New York". - extended\_locality: Extended locality is concatenated version of admin1 and locality with comma separator. For example, "Mountain View, CA" and "New York, NY". - postal\_code: Postal code of profile which follows locale code. - country: Country code (ISO-3166-1 alpha-2 code) of profile, such as US, JP, GB. - job\_title: Normalized job titles specified in EmploymentHistory. - company\_name: Normalized company name of profiles to match on. - institution: The school name. For example, "MIT", "University of California, Berkeley" - degree: Highest education degree in ISCED code. Each value in degree covers specific level of education, without any expansion to upper nor lower levels of education degree. - experience\_in\_months: experience in months. 0 means 0 month to 1 month (exclusive). - application\_date: The application date specifies application start dates. See [ApplicationDateFilter\` for more details. - application\_outcome\_reason: The application outcome reason specifies the outcome reasons of job application. See ``ApplicationOutcomeReasonFilter`` for more details. - application\_last\_stage: The application last stage specifies the last stage of job application. See ``ApplicationLastStageFilter`` for more details. - application\_job\_title: The application job title specifies the job applied for in the application. See ``ApplicationJobFilter`` for more details. - application\_status: The application status specifies the status of job application. See ``ApplicationStatusFilter`` for more details. - hirable\_status: Hirable status specifies the profile's hirable status. - string\_custom\_attribute: String custom attributes. Values can be accessed via square bracket notation like string\_custom\_attribute["key1"]. - numeric\_custom\_attribute: Numeric custom attributes. Values can be accessed via square bracket notation like numeric\_custom\_attribute["key1"]. Example expressions: - count(admin1) - count(experience\_in\_months, [bucket(0, 12, "1 year"), bucket(12, 36, "1-3 years"), bucket(36, MAX, "3+ years")]) - count(string\_custom\_attribute["assigned\_recruiter"]) - count(numeric\_custom\_attribute["favorite\_number"], [bucket(MIN, 0, "negative"), bucket(0, MAX, "non-negative")]) If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.talent_v4beta1.types.HistogramQuery` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.gax.PageIterator` instance. By default, this is an iterable of :class:`~google.cloud.talent_v4beta1.types.HistogramQueryResult` instances. This object can also be configured to iterate over the pages of the response through the `options` parameter. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
def fail(self, message, status=500, **kw): """Set a JSON error object and a status to the response """ self.request.response.setStatus(status) result = {"success": False, "errors": message, "status": status} result.update(kw) return result
Set a JSON error object and a status to the response
def load_structure_from_file(context: InstaloaderContext, filename: str) -> JsonExportable: """Loads a :class:`Post`, :class:`Profile` or :class:`StoryItem` from a '.json' or '.json.xz' file that has been saved by :func:`save_structure_to_file`. :param context: :attr:`Instaloader.context` linked to the new object, used for additional queries if neccessary. :param filename: Filename, ends in '.json' or '.json.xz' """ compressed = filename.endswith('.xz') if compressed: fp = lzma.open(filename, 'rt') else: fp = open(filename, 'rt') json_structure = json.load(fp) fp.close() if 'node' in json_structure and 'instaloader' in json_structure and \ 'node_type' in json_structure['instaloader']: node_type = json_structure['instaloader']['node_type'] if node_type == "Post": return Post(context, json_structure['node']) elif node_type == "Profile": return Profile(context, json_structure['node']) elif node_type == "StoryItem": return StoryItem(context, json_structure['node']) else: raise InvalidArgumentException("{}: Not an Instaloader JSON.".format(filename)) elif 'shortcode' in json_structure: # Post JSON created with Instaloader v3 return Post.from_shortcode(context, json_structure['shortcode']) else: raise InvalidArgumentException("{}: Not an Instaloader JSON.".format(filename))
Loads a :class:`Post`, :class:`Profile` or :class:`StoryItem` from a '.json' or '.json.xz' file that has been saved by :func:`save_structure_to_file`. :param context: :attr:`Instaloader.context` linked to the new object, used for additional queries if neccessary. :param filename: Filename, ends in '.json' or '.json.xz'
def parse(cls, value, default=_no_default): """Parses a flag integer or string into a Flags instance. Accepts the following types: - Members of this enum class. These are returned directly. - Integers. These are converted directly into a Flags instance with the given name. - Strings. The function accepts a comma-delimited list of flag names, corresponding to members of the enum. These are all ORed together. Examples: >>> class Car(Flags): ... is_big = 1 ... has_wheels = 2 >>> Car.parse(1) Car.is_big >>> Car.parse(3) Car.parse('has_wheels,is_big') >>> Car.parse('is_big,has_wheels') Car.parse('has_wheels,is_big') """ if isinstance(value, cls): return value elif isinstance(value, int): e = cls._make_value(value) else: if not value: e = cls._make_value(0) else: r = 0 for k in value.split(","): v = cls._name_to_member.get(k, _no_default) if v is _no_default: if default is _no_default: raise _create_invalid_value_error(cls, value) else: return default r |= v.value e = cls._make_value(r) if not e.is_valid(): if default is _no_default: raise _create_invalid_value_error(cls, value) return default return e
Parses a flag integer or string into a Flags instance. Accepts the following types: - Members of this enum class. These are returned directly. - Integers. These are converted directly into a Flags instance with the given name. - Strings. The function accepts a comma-delimited list of flag names, corresponding to members of the enum. These are all ORed together. Examples: >>> class Car(Flags): ... is_big = 1 ... has_wheels = 2 >>> Car.parse(1) Car.is_big >>> Car.parse(3) Car.parse('has_wheels,is_big') >>> Car.parse('is_big,has_wheels') Car.parse('has_wheels,is_big')
def usedoc(other): ''' Decorator which copies __doc__ of given object into decorated one. Usage: >>> def fnc1(): ... """docstring""" ... pass >>> @usedoc(fnc1) ... def fnc2(): ... pass >>> fnc2.__doc__ 'docstring'collections.abc.D :param other: anything with a __doc__ attribute :type other: any :returns: decorator function :rtype: callable ''' def inner(fnc): fnc.__doc__ = fnc.__doc__ or getattr(other, '__doc__') return fnc return inner
Decorator which copies __doc__ of given object into decorated one. Usage: >>> def fnc1(): ... """docstring""" ... pass >>> @usedoc(fnc1) ... def fnc2(): ... pass >>> fnc2.__doc__ 'docstring'collections.abc.D :param other: anything with a __doc__ attribute :type other: any :returns: decorator function :rtype: callable
def get(name, function=None): """Get a setting. `name` should be the name of the setting to look for. If the optional argument `function` is passed, this will look for a value local to the function before retrieving the global value. """ if function is not None: if hasattr(function, Settings.FUNCTION_SETTINGS_NAME): if name in getattr(function, Settings.FUNCTION_SETTINGS_NAME): return getattr(function, Settings.FUNCTION_SETTINGS_NAME)[name] return Settings.__global_setting_values[name]
Get a setting. `name` should be the name of the setting to look for. If the optional argument `function` is passed, this will look for a value local to the function before retrieving the global value.
def verify_fft_options(opt, parser): """Parses the FFT options and verifies that they are reasonable. Parameters ---------- opt : object Result of parsing the CLI with OptionParser, or any object with the required attributes. parser : object OptionParser instance. """ if len(opt.fft_backends) > 0: _all_backends = get_backend_names() for backend in opt.fft_backends: if backend not in _all_backends: parser.error("Backend {0} is not available".format(backend)) for backend in get_backend_modules(): try: backend.verify_fft_options(opt, parser) except AttributeError: pass
Parses the FFT options and verifies that they are reasonable. Parameters ---------- opt : object Result of parsing the CLI with OptionParser, or any object with the required attributes. parser : object OptionParser instance.
def get_closed_indices(self): """ Get all closed indices. """ state = self.conn.cluster.state() status = self.status() indices_metadata = set(state['metadata']['indices'].keys()) indices_status = set(status['indices'].keys()) return indices_metadata.difference(indices_status)
Get all closed indices.
def _get_gs_path(): """Guess where the Ghostscript executable is and return its absolute path name.""" path = os.environ.get("PATH", os.defpath) for dir in path.split(os.pathsep): for name in ("gs", "gs.exe", "gswin32c.exe"): g = os.path.join(dir, name) if os.path.exists(g): return g raise Exception("Ghostscript not found. path=%s" % str(path))
Guess where the Ghostscript executable is and return its absolute path name.
def update_where(self, res, depth=0, since=None, **kwargs): "Like update() but uses WHERE-style args" fetch = lambda: self._fetcher.fetch_all_latest(res, 0, kwargs, since=since) self._update(res, fetch, depth)
Like update() but uses WHERE-style args
def subst_path(self, path, target=None, source=None): """Substitute a path list, turning EntryProxies into Nodes and leaving Nodes (and other objects) as-is.""" if not SCons.Util.is_List(path): path = [path] def s(obj): """This is the "string conversion" routine that we have our substitutions use to return Nodes, not strings. This relies on the fact that an EntryProxy object has a get() method that returns the underlying Node that it wraps, which is a bit of architectural dependence that we might need to break or modify in the future in response to additional requirements.""" try: get = obj.get except AttributeError: obj = SCons.Util.to_String_for_subst(obj) else: obj = get() return obj r = [] for p in path: if SCons.Util.is_String(p): p = self.subst(p, target=target, source=source, conv=s) if SCons.Util.is_List(p): if len(p) == 1: p = p[0] else: # We have an object plus a string, or multiple # objects that we need to smush together. No choice # but to make them into a string. p = ''.join(map(SCons.Util.to_String_for_subst, p)) else: p = s(p) r.append(p) return r
Substitute a path list, turning EntryProxies into Nodes and leaving Nodes (and other objects) as-is.
def thermal_conductivity_Magomedov(T, P, ws, CASRNs, k_w=None): r'''Calculate the thermal conductivity of an aqueous mixture of electrolytes using the form proposed by Magomedov [1]_. Parameters are loaded by the function as needed. Function will fail if an electrolyte is not in the database. .. math:: \lambda = \lambda_w\left[ 1 - \sum_{i=1}^n A_i (w_i + 2\times10^{-4} w_i^3)\right] - 2\times10^{-8} PT\sum_{i=1}^n w_i Parameters ---------- T : float Temperature of liquid [K] P : float Pressure of the liquid [Pa] ws : array Weight fractions of liquid components other than water CASRNs : array CAS numbers of the liquid components other than water k_w : float Liquid thermal condiuctivity or pure water at T and P, [W/m/K] Returns ------- kl : float Liquid thermal condiuctivity, [W/m/K] Notes ----- Range from 273 K to 473 K, P from 0.1 MPa to 100 MPa. C from 0 to 25 mass%. Internal untis are MPa for pressure and weight percent. An example is sought for this function. It is not possible to reproduce the author's values consistently. Examples -------- >>> thermal_conductivity_Magomedov(293., 1E6, [.25], ['7758-94-3'], k_w=0.59827) 0.548654049375 References ---------- .. [1] Magomedov, U. B. "The Thermal Conductivity of Binary and Multicomponent Aqueous Solutions of Inorganic Substances at High Parameters of State." High Temperature 39, no. 2 (March 1, 2001): 221-26. doi:10.1023/A:1017518731726. ''' P = P/1E6 ws = [i*100 for i in ws] if not k_w: raise Exception('k_w correlation must be provided') sum1 = 0 for i, CASRN in enumerate(CASRNs): Ai = float(Magomedovk_thermal_cond.at[CASRN, 'Ai']) sum1 += Ai*(ws[i] + 2E-4*ws[i]**3) return k_w*(1 - sum1) - 2E-8*P*T*sum(ws)
r'''Calculate the thermal conductivity of an aqueous mixture of electrolytes using the form proposed by Magomedov [1]_. Parameters are loaded by the function as needed. Function will fail if an electrolyte is not in the database. .. math:: \lambda = \lambda_w\left[ 1 - \sum_{i=1}^n A_i (w_i + 2\times10^{-4} w_i^3)\right] - 2\times10^{-8} PT\sum_{i=1}^n w_i Parameters ---------- T : float Temperature of liquid [K] P : float Pressure of the liquid [Pa] ws : array Weight fractions of liquid components other than water CASRNs : array CAS numbers of the liquid components other than water k_w : float Liquid thermal condiuctivity or pure water at T and P, [W/m/K] Returns ------- kl : float Liquid thermal condiuctivity, [W/m/K] Notes ----- Range from 273 K to 473 K, P from 0.1 MPa to 100 MPa. C from 0 to 25 mass%. Internal untis are MPa for pressure and weight percent. An example is sought for this function. It is not possible to reproduce the author's values consistently. Examples -------- >>> thermal_conductivity_Magomedov(293., 1E6, [.25], ['7758-94-3'], k_w=0.59827) 0.548654049375 References ---------- .. [1] Magomedov, U. B. "The Thermal Conductivity of Binary and Multicomponent Aqueous Solutions of Inorganic Substances at High Parameters of State." High Temperature 39, no. 2 (March 1, 2001): 221-26. doi:10.1023/A:1017518731726.
def all_pairs(seq1, seq2=None): """Yields all pairs drawn from ``seq1`` and ``seq2``. If ``seq2`` is ``None``, ``seq2 = seq1``. >>> stop_at.ed(all_pairs(xrange(100000), xrange(100000)), 8) ((0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7)) """ if seq2 is None: seq2 = seq1 for item1 in seq1: for item2 in seq2: yield (item1, item2)
Yields all pairs drawn from ``seq1`` and ``seq2``. If ``seq2`` is ``None``, ``seq2 = seq1``. >>> stop_at.ed(all_pairs(xrange(100000), xrange(100000)), 8) ((0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7))
def set(self, prefix, url, obj): """ Add an object into the cache """ if not self.cache_dir: return filename = self._get_cache_file(prefix, url) try: os.makedirs(os.path.join(self.cache_dir, prefix)) except OSError: pass with open(filename, 'wb') as file: pickle.dump(obj, file)
Add an object into the cache
def widget_from_django_field(cls, f, default=widgets.Widget): """ Returns the widget that would likely be associated with each Django type. Includes mapping of Postgres Array and JSON fields. In the case that psycopg2 is not installed, we consume the error and process the field regardless. """ result = default internal_type = "" if callable(getattr(f, "get_internal_type", None)): internal_type = f.get_internal_type() if internal_type in cls.WIDGETS_MAP: result = cls.WIDGETS_MAP[internal_type] if isinstance(result, str): result = getattr(cls, result)(f) else: try: from django.contrib.postgres.fields import ArrayField, JSONField except ImportError: # ImportError: No module named psycopg2.extras class ArrayField: pass class JSONField: pass if isinstance(f, ArrayField): return widgets.SimpleArrayWidget elif isinstance(f, JSONField): return widgets.JSONWidget return result
Returns the widget that would likely be associated with each Django type. Includes mapping of Postgres Array and JSON fields. In the case that psycopg2 is not installed, we consume the error and process the field regardless.
def floating_ip_disassociate(self, server_name, floating_ip): ''' Disassociate a floating IP from server .. versionadded:: 2016.3.0 ''' nt_ks = self.compute_conn server_ = self.server_by_name(server_name) server = nt_ks.servers.get(server_.__dict__['id']) server.remove_floating_ip(floating_ip) return self.floating_ip_list()[floating_ip]
Disassociate a floating IP from server .. versionadded:: 2016.3.0
def arr_astype(arr_type): # function factory '''Change dtype of array. Parameters: arr_type : str, np.dtype Character codes (e.g. 'b', '>H'), type strings (e.g. 'i4', 'f8'), Python types (e.g. float, int) and numpy dtypes (e.g. np.uint32) are allowed. Returns: array : np.array ''' def f_astype(arr): return arr.astype(arr_type) f_astype.__name__ = "arr_astype_" + str(arr_type) # or use inspect module: inspect.stack()[0][3] return f_astype
Change dtype of array. Parameters: arr_type : str, np.dtype Character codes (e.g. 'b', '>H'), type strings (e.g. 'i4', 'f8'), Python types (e.g. float, int) and numpy dtypes (e.g. np.uint32) are allowed. Returns: array : np.array
def _init_display(self): """! \~english Initialize the SSD1306 display chip \~chinese 初始化SSD1306显示芯片 """ self._command([ # 0xAE self.CMD_SSD1306_DISPLAY_OFF, #Stop Scroll self.CMD_SSD1306_SET_SCROLL_DEACTIVE, # 0xA8 SET MULTIPLEX 0x3F self.CMD_SSD1306_SET_MULTIPLEX_RATIO, 0x3F, # 0xD3 SET DISPLAY OFFSET self.CMD_SSD1306_SET_DISPLAY_OFFSET, 0x00, # 0x40 Set Mapping RAM Display Start Line (0x00~0x3F) self.CMD_SSD1306_SET_DISPLAY_START_LINE, # 0xDA Set COM Pins hardware configuration, (0x00/0x01/0x02) self.CMD_SSD1306_SET_COM_PINS, (0x02 | 0x10), self.CMD_SSD1306_SET_CONTRAST, 0x7F, # 0xA4 Disable Entire Display On self.CMD_SSD1306_ENTIRE_DISPLAY_ON_0, # 0xA6 Set normal display self.CMD_SSD1306_NORMAL_DISPLAY, # 0xA7 Set inverse display # CMD_SSD1306_INVERSE_DISPLAY, # 0xD5 Set osc frequency 0x80 self.CMD_SSD1306_SET_CLOCK_DIVIDE_RATIO, 0x80, # 0x8D Enable DC/DC charge pump regulator 0x14 self.CMD_SSD1306_CHARGE_PUMP, 0x14, # 0x20 Set Page Addressing Mode (0x00/0x01/0x02) self.CMD_SSD1306_SET_MEM_ADDR_MODE, 0x01, # 0xC0 / 0xC8 Set COM Output Scan Direction #CMD_SSD1306_SCAN_DIRECTION_INC, #CMD_SSD1306_SCAN_DIRECTION_DEC, self.CMD_SSD1306_SCAN_DIRECTION_INC if self._mirror_v else self.CMD_SSD1306_SCAN_DIRECTION_DEC, # 0xA0 / oxA1 Set Segment re-map # 0xA0 left to right # 0xA1 right to left self.CMD_SSD1306_SET_SEGMENT_REMAP_0 if self._mirror_h else self.CMD_SSD1306_SET_SEGMENT_REMAP_1, ])
! \~english Initialize the SSD1306 display chip \~chinese 初始化SSD1306显示芯片
def _discovery(self): """ Find other servers asking nodes to given server """ data = self.cluster_nodes() self.cluster_name = data["cluster_name"] for _, nodedata in list(data["nodes"].items()): server = nodedata['http_address'].replace("]", "").replace("inet[", "http:/") if server not in self.servers: self.servers.append(server) self._init_connection() return self.servers
Find other servers asking nodes to given server
def get(self, key, default=NoDefault): """Retrieve a value from its key. Retrieval steps are: 1) Normalize the key 2) For each option group: a) Retrieve the value at that key b) If no value exists, continue c) If the value is an instance of 'Default', continue d) Otherwise, return the value 3) If no option had a non-default value for the key, return the first Default() option for the key (or :arg:`default`). """ key = normalize_key(key) if default is NoDefault: defaults = [] else: defaults = [default] for options in self.options: try: value = options[key] except KeyError: continue if isinstance(value, Default): defaults.append(value.value) continue else: return value if defaults: return defaults[0] return NoDefault
Retrieve a value from its key. Retrieval steps are: 1) Normalize the key 2) For each option group: a) Retrieve the value at that key b) If no value exists, continue c) If the value is an instance of 'Default', continue d) Otherwise, return the value 3) If no option had a non-default value for the key, return the first Default() option for the key (or :arg:`default`).
def drilldown_tree(self, session=None, json=False, json_fields=None): """ This method generate a branch from a tree, begining with current node. For example: node7.drilldown_tree() .. code:: level Nested sets example 1 1(1)22 --------------------- _______________|_________|_________ | | | | | | 2 2(2)5 6(4)11 | 12(7)21 | | ^ | ^ | 3 3(3)4 7(5)8 9(6)10 | 13(8)16 17(10)20 | | | | | 4 | 14(9)15 18(11)19 | | | --------------------- Example in tests: * :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_drilldown_tree` """ if not session: session = object_session(self) return self.get_tree( session, json=json, json_fields=json_fields, query=self._drilldown_query )
This method generate a branch from a tree, begining with current node. For example: node7.drilldown_tree() .. code:: level Nested sets example 1 1(1)22 --------------------- _______________|_________|_________ | | | | | | 2 2(2)5 6(4)11 | 12(7)21 | | ^ | ^ | 3 3(3)4 7(5)8 9(6)10 | 13(8)16 17(10)20 | | | | | 4 | 14(9)15 18(11)19 | | | --------------------- Example in tests: * :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_drilldown_tree`
def authenticate(self): """Attempts to authenticate the user if a token was provided.""" if request.headers.get('Authorization', '').startswith('Negotiate '): in_token = base64.b64decode(request.headers['Authorization'][10:]) try: creds = current_app.extensions['gssapi']['creds'] except KeyError: raise RuntimeError('flask-gssapi not configured for this app') ctx = gssapi.SecurityContext(creds=creds, usage='accept') out_token = ctx.step(in_token) if ctx.complete: username = ctx._inquire(initiator_name=True).initiator_name return str(username), out_token return None, None
Attempts to authenticate the user if a token was provided.
def make_gui(self): """ Setups the general structure of the gui, the first function called """ self.option_window = Toplevel() self.option_window.protocol("WM_DELETE_WINDOW", self.on_exit) self.canvas_frame = tk.Frame(self, height=500) self.option_frame = tk.Frame(self.option_window, height=300) self.canvas_frame.pack(side=tk.LEFT, fill=tk.BOTH, expand=True) self.option_frame.pack(side=tk.RIGHT, fill=None, expand=False) self.make_options_frame() self.make_canvas_frame() self.disable_singlecolor()
Setups the general structure of the gui, the first function called
def check_if_ready(self): """Check for and fetch the results if ready.""" try: results = self.manager.check(self.results_id) except exceptions.ResultsNotReady as e: self._is_ready = False self._not_ready_exception = e except exceptions.ResultsExpired as e: self._is_ready = True self._expired_exception = e else: failures = self.get_failed_requests(results) members = self.get_new_members(results) self.results = self.__class__.Results(list(members), list(failures)) self._is_ready = True self._not_ready_exception = None
Check for and fetch the results if ready.
def update_distant_reference(self, ref): """Validate and update the reference in Zotero. Existing fields not present will be left unmodified. """ self.validate_reference_data(ref["data"]) self._zotero_lib.update_item(ref)
Validate and update the reference in Zotero. Existing fields not present will be left unmodified.
def show_tracebacks(self): """ Show tracebacks """ if self.broker.tracebacks: print(file=self.stream) print("Tracebacks:", file=self.stream) for t in self.broker.tracebacks.values(): print(t, file=self.stream)
Show tracebacks
def get_release_environment(self, project, release_id, environment_id): """GetReleaseEnvironment. [Preview API] Get a release environment. :param str project: Project ID or project name :param int release_id: Id of the release. :param int environment_id: Id of the release environment. :rtype: :class:`<ReleaseEnvironment> <azure.devops.v5_1.release.models.ReleaseEnvironment>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if release_id is not None: route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int') if environment_id is not None: route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int') response = self._send(http_method='GET', location_id='a7e426b1-03dc-48af-9dfe-c98bac612dcb', version='5.1-preview.6', route_values=route_values) return self._deserialize('ReleaseEnvironment', response)
GetReleaseEnvironment. [Preview API] Get a release environment. :param str project: Project ID or project name :param int release_id: Id of the release. :param int environment_id: Id of the release environment. :rtype: :class:`<ReleaseEnvironment> <azure.devops.v5_1.release.models.ReleaseEnvironment>`
def get_filter_func(patterns, prefix): """ Provides a filter function that can be used as filter argument on ``tarfile.add``. Generates the filter based on the patterns and prefix provided. Patterns should be a list of tuples. Each tuple consists of a compiled RegEx pattern and a boolean, indicating if it is an ignore entry or a negative exclusion (i.e. an exemption from exclusions). The prefix is used to match relative paths inside the tar file, and is removed from every entry passed into the functions. Note that all names passed into the returned function must be paths under the provided prefix. This condition is not checked! :param patterns: List of patterns and negative indicator. :type patterns: list[(__RegEx, bool)] :param prefix: Prefix to strip from all file names passed in. Leading and trailing path separators are removed. :type prefix: unicode | str :return: tarinfo.TarInfo -> tarinfo.TarInfo | NoneType """ prefix_len = len(prefix.strip(os.path.sep)) + 1 if any(i[1] for i in patterns): def _exclusion_func(tarinfo): name = tarinfo.name[prefix_len:] exclude = False for match_str, is_negative in patterns: if is_negative: if not exclude: continue if match_str.match(name) is not None: exclude = False elif exclude: continue elif match_str.match(name) is not None: exclude = True if exclude: return None return tarinfo else: # Optimized version: If there are no exemptions from matches, not all matches have to be processed. exclusions = [i[0] for i in patterns] def _exclusion_func(tarinfo): name = tarinfo.name[prefix_len:] if any(match_str.match(name) is not None for match_str in exclusions): return None return tarinfo return _exclusion_func
Provides a filter function that can be used as filter argument on ``tarfile.add``. Generates the filter based on the patterns and prefix provided. Patterns should be a list of tuples. Each tuple consists of a compiled RegEx pattern and a boolean, indicating if it is an ignore entry or a negative exclusion (i.e. an exemption from exclusions). The prefix is used to match relative paths inside the tar file, and is removed from every entry passed into the functions. Note that all names passed into the returned function must be paths under the provided prefix. This condition is not checked! :param patterns: List of patterns and negative indicator. :type patterns: list[(__RegEx, bool)] :param prefix: Prefix to strip from all file names passed in. Leading and trailing path separators are removed. :type prefix: unicode | str :return: tarinfo.TarInfo -> tarinfo.TarInfo | NoneType
def add_header_info(data_api, struct_inflator): """ Add ancilliary header information to the structure. :param data_api the interface to the decoded data :param struct_inflator the interface to put the data into the client object """ struct_inflator.set_header_info(data_api.r_free, data_api.r_work, data_api.resolution, data_api.title, data_api.deposition_date, data_api.release_date, data_api.experimental_methods)
Add ancilliary header information to the structure. :param data_api the interface to the decoded data :param struct_inflator the interface to put the data into the client object
def cmdloop(self, intro=None): ''' Override the command loop to handle Ctrl-C. ''' self.preloop() # Set up completion with readline. if self.use_rawinput and self.completekey: try: import readline self.old_completer = readline.get_completer() readline.set_completer(self.complete) readline.parse_and_bind(self.completekey + ': complete') except ImportError: pass try: if intro is not None: self.intro = intro if self.intro: self.stdout.write(str(self.intro)+"\n") stop = None while not stop: if self.cmdqueue: line = self.cmdqueue.pop(0) else: if self.use_rawinput: try: if sys.version_info[0] == 2: line = raw_input(self.prompt) else: line = input(self.prompt) except EOFError: line = 'EOF' except KeyboardInterrupt: line = 'ctrlc' else: self.stdout.write(self.prompt) self.stdout.flush() line = self.stdin.readline() if not len(line): line = 'EOF' else: line = line.rstrip('\r\n') line = self.precmd(line) stop = self.onecmd(line) stop = self.postcmd(stop, line) self.postloop() finally: if self.use_rawinput and self.completekey: try: import readline readline.set_completer(self.old_completer) except ImportError: pass
Override the command loop to handle Ctrl-C.
def accessibles(self, roles=None): """ Returns the list of *slugs* for which the accounts are accessibles by ``request.user`` filtered by ``roles`` if present. """ return [org['slug'] for org in self.get_accessibles(self.request, roles=roles)]
Returns the list of *slugs* for which the accounts are accessibles by ``request.user`` filtered by ``roles`` if present.
def update_firewall_rule(firewall_rule, protocol=None, action=None, name=None, description=None, ip_version=None, source_ip_address=None, destination_ip_address=None, source_port=None, destination_port=None, shared=None, enabled=None, profile=None): ''' Update a firewall rule CLI Example: .. code-block:: bash salt '*' neutron.update_firewall_rule firewall_rule protocol=PROTOCOL action=ACTION name=NAME description=DESCRIPTION ip_version=IP_VERSION source_ip_address=SOURCE_IP_ADDRESS destination_ip_address=DESTINATION_IP_ADDRESS source_port=SOURCE_PORT destination_port=DESTINATION_PORT shared=SHARED enabled=ENABLED :param firewall_rule: ID or name of firewall rule to update. :param protocol: Protocol for the firewall rule, choose "tcp","udp","icmp" or "None". (Optional) :param action: Action for the firewall rule, choose "allow" or "deny". (Optional) :param name: Name for the firewall rule. (Optional) :param description: Description for the firewall rule. (Optional) :param ip_version: IP protocol version, default: 4. (Optional) :param source_ip_address: Source IP address or subnet. (Optional) :param destination_ip_address: Destination IP address or subnet. (Optional) :param source_port: Source port (integer in [1, 65535] or range in a:b). (Optional) :param destination_port: Destination port (integer in [1, 65535] or range in a:b). (Optional) :param shared: Set shared to True, default: False. (Optional) :param enabled: To enable this rule, default: True. (Optional) :param profile: Profile to build on (Optional) ''' conn = _auth(profile) return conn.update_firewall_rule(firewall_rule, protocol, action, name, description, ip_version, source_ip_address, destination_ip_address, source_port, destination_port, shared, enabled)
Update a firewall rule CLI Example: .. code-block:: bash salt '*' neutron.update_firewall_rule firewall_rule protocol=PROTOCOL action=ACTION name=NAME description=DESCRIPTION ip_version=IP_VERSION source_ip_address=SOURCE_IP_ADDRESS destination_ip_address=DESTINATION_IP_ADDRESS source_port=SOURCE_PORT destination_port=DESTINATION_PORT shared=SHARED enabled=ENABLED :param firewall_rule: ID or name of firewall rule to update. :param protocol: Protocol for the firewall rule, choose "tcp","udp","icmp" or "None". (Optional) :param action: Action for the firewall rule, choose "allow" or "deny". (Optional) :param name: Name for the firewall rule. (Optional) :param description: Description for the firewall rule. (Optional) :param ip_version: IP protocol version, default: 4. (Optional) :param source_ip_address: Source IP address or subnet. (Optional) :param destination_ip_address: Destination IP address or subnet. (Optional) :param source_port: Source port (integer in [1, 65535] or range in a:b). (Optional) :param destination_port: Destination port (integer in [1, 65535] or range in a:b). (Optional) :param shared: Set shared to True, default: False. (Optional) :param enabled: To enable this rule, default: True. (Optional) :param profile: Profile to build on (Optional)
def get_version(brain_or_object): """Get the version of the current object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: The current version of the object, or None if not available :rtype: int or None """ obj = get_object(brain_or_object) if not is_versionable(obj): return None return getattr(aq_base(obj), "version_id", 0)
Get the version of the current object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: The current version of the object, or None if not available :rtype: int or None
def removeSessionWithKey(self, key): """ Remove a persistent session, if it exists. @type key: L{bytes} @param key: The persistent session identifier. """ self.store.query( PersistentSession, PersistentSession.sessionKey == key).deleteFromStore()
Remove a persistent session, if it exists. @type key: L{bytes} @param key: The persistent session identifier.
def _method_complete(self, result): """Called after an extention method with the result.""" if isinstance(result, PrettyTensor): self._head = result return self elif isinstance(result, Loss): return result elif isinstance(result, PrettyTensorTupleMixin): self._head = result[0] return result else: self._head = self._head.with_tensor(result) return self
Called after an extention method with the result.
def _load_actor_from_local(self, driver_id, function_descriptor): """Load actor class from local code.""" module_name, class_name = (function_descriptor.module_name, function_descriptor.class_name) try: module = importlib.import_module(module_name) actor_class = getattr(module, class_name) if isinstance(actor_class, ray.actor.ActorClass): return actor_class._modified_class else: return actor_class except Exception: logger.exception( "Failed to load actor_class %s.".format(class_name)) raise Exception( "Actor {} failed to be imported from local code.".format( class_name))
Load actor class from local code.
def locked_get(self): """Retrieve Credential from file. Returns: oauth2client.client.Credentials Raises: IOError if the file is a symbolic link. """ credentials = None _helpers.validate_file(self._filename) try: f = open(self._filename, 'rb') content = f.read() f.close() except IOError: return credentials try: credentials = client.Credentials.new_from_json(content) credentials.set_store(self) except ValueError: pass return credentials
Retrieve Credential from file. Returns: oauth2client.client.Credentials Raises: IOError if the file is a symbolic link.
def _wraptext(self, text, indent=0, width=0): """Shorthand for '\n'.join(self._wrap(par, indent, width) for par in text).""" return '\n'.join(self._wrap(par, indent, width) for par in text)
Shorthand for '\n'.join(self._wrap(par, indent, width) for par in text).
def customFilter(self, filterFunc): ''' customFilter - Apply a custom filter to elements and return a QueryableList of matches @param filterFunc <lambda/function< - A lambda/function that is passed an item, and returns True if the item matches (will be returned), otherwise False. @return - A QueryableList object of the same type, with only the matching objects returned. ''' ret = self.__class__() for item in self: if filterFunc(item): ret.append(item) return ret
customFilter - Apply a custom filter to elements and return a QueryableList of matches @param filterFunc <lambda/function< - A lambda/function that is passed an item, and returns True if the item matches (will be returned), otherwise False. @return - A QueryableList object of the same type, with only the matching objects returned.
def clear_processes(self): """ Removes all L{Process}, L{Thread} and L{Module} objects in this snapshot. """ #self.close_process_and_thread_handles() for aProcess in self.iter_processes(): aProcess.clear() self.__processDict = dict()
Removes all L{Process}, L{Thread} and L{Module} objects in this snapshot.
def populate_field_list(self, excluded_fields=None): """Helper to add field of the layer to the list. :param excluded_fields: List of field that want to be excluded. :type excluded_fields: list """ # Populate fields list if excluded_fields is None: excluded_fields = [] self.field_list.clear() for field in self.layer.fields(): # Skip if it's excluded if field.name() in excluded_fields: continue # Skip if it's not number (float, int, etc) if field.type() not in qvariant_numbers: continue field_item = QListWidgetItem(self.field_list) field_item.setFlags( Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsDragEnabled) field_item.setData(Qt.UserRole, field.name()) field_item.setText(field.name()) self.field_list.addItem(field_item)
Helper to add field of the layer to the list. :param excluded_fields: List of field that want to be excluded. :type excluded_fields: list
def runif(self, seed=None): """ Generate a column of random numbers drawn from a uniform distribution [0,1) and having the same data layout as the source frame. :param int seed: seed for the random number generator. :returns: Single-column H2OFrame filled with doubles sampled uniformly from [0,1). """ fr = H2OFrame._expr(expr=ExprNode("h2o.runif", self, -1 if seed is None else seed)) fr._ex._cache.ncols = 1 fr._ex._cache.nrows = self.nrow return fr
Generate a column of random numbers drawn from a uniform distribution [0,1) and having the same data layout as the source frame. :param int seed: seed for the random number generator. :returns: Single-column H2OFrame filled with doubles sampled uniformly from [0,1).
def collect_ansible_classes(): """Run playbook and collect classes of ansible that are run.""" def trace_calls(frame, event, arg): # pylint: disable=W0613 """Trace function calls to collect ansible classes. Trace functions and check if they have self as an arg. If so, get their class if the class belongs to ansible. """ if event != 'call': return try: _locals = inspect.getargvalues(frame).locals if 'self' not in _locals: return _class = _locals['self'].__class__ _class_repr = repr(_class) if 'ansible' not in _class_repr: return ANSIBLE_CLASSES[_class] = True except (AttributeError, TypeError): pass print "Gathering classes" sys.settrace(trace_calls) main()
Run playbook and collect classes of ansible that are run.
def logspace(self,bins=None,units=None,conversion_function=convert_time,resolution=None,end_at_end=True): """ bins overwrites resolution """ if type(bins) in [list, np.ndarray]: return bins min = conversion_function(self.min,from_units=self.units,to_units=units) max = conversion_function(self.max,from_units=self.units,to_units=units) if units is None: units = self.units if resolution is None: resolution = 1.0 if bins is None: bins = self.len(resolution=resolution,units=units,conversion_function=conversion_function)# + 1 if units != '1' and end_at_end: # continuous variable behaviour: # we end with the last valid value at the outer edge return np.logspace(np.log10(min),np.log10(max),bins+1)[:-1] # discrete variable behaviour: # we end with the last valid value as its own bin return np.logspace(np.log10(min),np.log10(max),bins)
bins overwrites resolution
def send_await(self, msg, deadline=None): """ Like :meth:`send_async`, but expect a single reply (`persist=False`) delivered within `deadline` seconds. :param mitogen.core.Message msg: The message. :param float deadline: If not :data:`None`, seconds before timing out waiting for a reply. :returns: Deserialized reply. :raises TimeoutError: No message was received and `deadline` passed. """ receiver = self.send_async(msg) response = receiver.get(deadline) data = response.unpickle() _vv and IOLOG.debug('%r._send_await() -> %r', self, data) return data
Like :meth:`send_async`, but expect a single reply (`persist=False`) delivered within `deadline` seconds. :param mitogen.core.Message msg: The message. :param float deadline: If not :data:`None`, seconds before timing out waiting for a reply. :returns: Deserialized reply. :raises TimeoutError: No message was received and `deadline` passed.
def _head_object(s3_conn, bucket, key): """Retrieve information about an object in S3 if it exists. Args: s3_conn (botocore.client.S3): S3 connection to use for operations. bucket (str): name of the bucket containing the key. key (str): name of the key to lookup. Returns: dict: S3 object information, or None if the object does not exist. See the AWS documentation for explanation of the contents. Raises: botocore.exceptions.ClientError: any error from boto3 other than key not found is passed through. """ try: return s3_conn.head_object(Bucket=bucket, Key=key) except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == '404': return None else: raise
Retrieve information about an object in S3 if it exists. Args: s3_conn (botocore.client.S3): S3 connection to use for operations. bucket (str): name of the bucket containing the key. key (str): name of the key to lookup. Returns: dict: S3 object information, or None if the object does not exist. See the AWS documentation for explanation of the contents. Raises: botocore.exceptions.ClientError: any error from boto3 other than key not found is passed through.
def getAllData(self, temp = True, accel = True, gyro = True): """! Get all the available data. @param temp: True - Allow to return Temperature data @param accel: True - Allow to return Accelerometer data @param gyro: True - Allow to return Gyroscope data @return a dictionary data @retval {} Did not read any data @retval {"temp":32.3,"accel":{"x":0.45634,"y":0.2124,"z":1.334},"gyro":{"x":0.45634,"y":0.2124,"z":1.334}} Returned all data """ allData = {} if temp: allData["temp"] = self.getTemp() if accel: allData["accel"] = self.getAccelData( raw = False ) if gyro: allData["gyro"] = self.getGyroData() return allData
! Get all the available data. @param temp: True - Allow to return Temperature data @param accel: True - Allow to return Accelerometer data @param gyro: True - Allow to return Gyroscope data @return a dictionary data @retval {} Did not read any data @retval {"temp":32.3,"accel":{"x":0.45634,"y":0.2124,"z":1.334},"gyro":{"x":0.45634,"y":0.2124,"z":1.334}} Returned all data
def refresh(self): '''Refetch instance data from the API. ''' response = requests.get('%s/categories/%s' % (API_BASE_URL, self.name)) attributes = response.json() self.ancestors = [Category(name) for name in attributes['ancestors']] self.contents = WikiText(attributes['contents_raw'], attributes['contents_rendered']) self.description = attributes['description'] self.guides = [] for guide in attributes['guides']: self.guides.append(Guide(guide['guideid'])) # Unlike guides, categories return flags as a dict, keyed by flagid. # *Except* when it's empty, in which case we get an empty list due to # PHP's json_encode() not knowing the difference between an empty array # and an empty dict. flags = dict(attributes['flags']).values() self.flags = [Flag.from_id(flag['flagid']) for flag in flags] self.image = Image(attributes['image']['id']) if attributes['image'] else None self.locale = attributes['locale'] #self.parts = attributes['parts'] #self.solutions = attributes['solutions'] self.title = attributes['display_title']
Refetch instance data from the API.
def _raise_unrecoverable_error_payplug(self, exception): """ Raises an exceptions.ClientError with a message telling that the error probably comes from PayPlug. :param exception: Exception that caused the ClientError. :type exception: Exception :raise exceptions.ClientError """ message = ('There was an unrecoverable error during the HTTP request. It seems to come from our servers. ' 'If you are behind a proxy, ensure that it is configured correctly. If the issue persists, do not ' 'hesitate to contact us with the following information: `' + repr(exception) + '`.') raise exceptions.ClientError(message, client_exception=exception)
Raises an exceptions.ClientError with a message telling that the error probably comes from PayPlug. :param exception: Exception that caused the ClientError. :type exception: Exception :raise exceptions.ClientError
def GetLastKey(self, voice=1): """key as in musical key, not index""" voice_obj = self.GetChild(voice) if voice_obj is not None: key = BackwardSearch(KeyNode, voice_obj, 1) if key is not None: return key else: if hasattr(self, "key"): return self.key else: if hasattr(self, "key"): return self.key
key as in musical key, not index
def receive_message(self, message, data): # noqa: E501 pylint: disable=too-many-return-statements """ Called when a multizone message is received. """ if data[MESSAGE_TYPE] == TYPE_DEVICE_ADDED: uuid = data['device']['deviceId'] name = data['device']['name'] self._add_member(uuid, name) return True if data[MESSAGE_TYPE] == TYPE_DEVICE_REMOVED: uuid = data['deviceId'] self._remove_member(uuid) return True if data[MESSAGE_TYPE] == TYPE_DEVICE_UPDATED: uuid = data['device']['deviceId'] name = data['device']['name'] self._add_member(uuid, name) return True if data[MESSAGE_TYPE] == TYPE_MULTIZONE_STATUS: members = data['status']['devices'] members = \ {member['deviceId']: member['name'] for member in members} removed_members = \ list(set(self._members.keys())-set(members.keys())) added_members = list(set(members.keys())-set(self._members.keys())) _LOGGER.debug("(%s) Added members %s, Removed members: %s", self._uuid, added_members, removed_members) for uuid in removed_members: self._remove_member(uuid) for uuid in added_members: self._add_member(uuid, members[uuid]) for listener in list(self._status_listeners): listener.multizone_status_received() return True if data[MESSAGE_TYPE] == TYPE_SESSION_UPDATED: # A temporary group has been formed return True if data[MESSAGE_TYPE] == TYPE_CASTING_GROUPS: # Answer to GET_CASTING_GROUPS return True return False
Called when a multizone message is received.
def format_hexdump(arg): """Convert the bytes object to a hexdump. The output format will be: <offset, 4-byte> <16-bytes of output separated by 1 space> <16 ascii characters> """ line = '' for i in range(0, len(arg), 16): if i > 0: line += '\n' chunk = arg[i:i + 16] hex_chunk = hexlify(chunk).decode('utf-8') hex_line = ' '.join(hex_chunk[j:j + 2] for j in range(0, len(hex_chunk), 2)) if len(hex_line) < (3 * 16) - 1: hex_line += ' ' * (((3 * 16) - 1) - len(hex_line)) ascii_line = ''.join(_convert_to_ascii(x) for x in chunk) offset_line = '%08x' % i line += "%s %s %s" % (offset_line, hex_line, ascii_line) return line
Convert the bytes object to a hexdump. The output format will be: <offset, 4-byte> <16-bytes of output separated by 1 space> <16 ascii characters>