code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def initializePhase(self, features, targets): """ Step 1: Initialization phase :param features feature matrix with dimension (numSamples, numInputs) :param targets target matrix with dimension (numSamples, numOutputs) """ assert features.shape[0] == targets.shape[0] assert features.shape[1] == self.inputs assert targets.shape[1] == self.outputs # randomly initialize the input->hidden connections self.inputWeights = np.random.random((self.numHiddenNeurons, self.inputs)) self.inputWeights = self.inputWeights * 2 - 1 if self.activationFunction is "sig": self.bias = np.random.random((1, self.numHiddenNeurons)) * 2 - 1 else: print " Unknown activation function type" raise NotImplementedError H0 = self.calculateHiddenLayerActivation(features) self.M = pinv(np.dot(np.transpose(H0), H0)) self.beta = np.dot(pinv(H0), targets)
Step 1: Initialization phase :param features feature matrix with dimension (numSamples, numInputs) :param targets target matrix with dimension (numSamples, numOutputs)
def append(self, other): """Appends another array to this array. The returned array will have all of the class methods and virutal fields of this array, including any that were added using `add_method` or `add_virtualfield`. If this array and other array have one or more string fields, the dtype for those fields are updated to a string length that can encompass the longest string in both arrays. .. note:: Increasing the length of strings only works for fields, not sub-fields. Parameters ---------- other : array The array to append values from. It must have the same fields and dtype as this array, modulo the length of strings. If the other array does not have the same dtype, a TypeError is raised. Returns ------- array An array with others values appended to this array's values. The returned array is an instance of the same class as this array, including all methods and virtual fields. """ try: return numpy.append(self, other).view(type=self.__class__) except TypeError: # see if the dtype error was due to string fields having different # lengths; if so, we'll make the joint field the larger of the # two str_fields = [name for name in self.fieldnames if _isstring(self.dtype[name])] # get the larger of the two new_strlens = dict( [[name, max(self.dtype[name].itemsize, other.dtype[name].itemsize)] for name in str_fields] ) # cast both to the new string lengths new_dt = [] for dt in self.dtype.descr: name = dt[0] if name in new_strlens: dt = (name, self.dtype[name].type, new_strlens[name]) new_dt.append(dt) new_dt = numpy.dtype(new_dt) return numpy.append( self.astype(new_dt), other.astype(new_dt) ).view(type=self.__class__)
Appends another array to this array. The returned array will have all of the class methods and virutal fields of this array, including any that were added using `add_method` or `add_virtualfield`. If this array and other array have one or more string fields, the dtype for those fields are updated to a string length that can encompass the longest string in both arrays. .. note:: Increasing the length of strings only works for fields, not sub-fields. Parameters ---------- other : array The array to append values from. It must have the same fields and dtype as this array, modulo the length of strings. If the other array does not have the same dtype, a TypeError is raised. Returns ------- array An array with others values appended to this array's values. The returned array is an instance of the same class as this array, including all methods and virtual fields.
def __update(self, row): """Update rows in table """ expr = self.__table.update().values(row) for key in self.__update_keys: expr = expr.where(getattr(self.__table.c, key) == row[key]) if self.__autoincrement: expr = expr.returning(getattr(self.__table.c, self.__autoincrement)) res = expr.execute() if res.rowcount > 0: if self.__autoincrement: first = next(iter(res)) last_row_id = first[0] return last_row_id return 0 return None
Update rows in table
def chdir(self, path=None): """ Change the "current directory" of this SFTP session. Since SFTP doesn't really have the concept of a current working directory, this is emulated by Paramiko. Once you use this method to set a working directory, all operations on this `.SFTPClient` object will be relative to that path. You can pass in ``None`` to stop using a current working directory. :param str path: new current working directory :raises IOError: if the requested path doesn't exist on the server .. versionadded:: 1.4 """ if path is None: self._cwd = None return if not stat.S_ISDIR(self.stat(path).st_mode): raise SFTPError(errno.ENOTDIR, "%s: %s" % (os.strerror(errno.ENOTDIR), path)) self._cwd = b(self.normalize(path))
Change the "current directory" of this SFTP session. Since SFTP doesn't really have the concept of a current working directory, this is emulated by Paramiko. Once you use this method to set a working directory, all operations on this `.SFTPClient` object will be relative to that path. You can pass in ``None`` to stop using a current working directory. :param str path: new current working directory :raises IOError: if the requested path doesn't exist on the server .. versionadded:: 1.4
def _delete_iapp(self, iapp_name, deploying_device): '''Delete an iapp service and template on the root device. :param iapp_name: str -- name of iapp :param deploying_device: ManagementRoot object -- device where the iapp will be deleted ''' iapp = deploying_device.tm.sys.application iapp_serv = iapp.services.service.load( name=iapp_name, partition=self.partition ) iapp_serv.delete() iapp_tmpl = iapp.templates.template.load( name=iapp_name, partition=self.partition ) iapp_tmpl.delete()
Delete an iapp service and template on the root device. :param iapp_name: str -- name of iapp :param deploying_device: ManagementRoot object -- device where the iapp will be deleted
def get_first_pos_of_char(char, string): ''' :param char: The character to find :type char: string :param string: The string in which to search for *char* :type string: string :returns: Index in *string* where *char* last appears (unescaped by a preceding "\\"), -1 if not found :rtype: int Finds the first occurrence of *char* in *string* in which *char* is not present as an escaped character. ''' first_pos = -1 pos = len(string) while pos > 0: pos = string[:pos].rfind(char) if pos == -1: return first_pos num_backslashes = 0 test_index = pos - 1 while test_index >= 0 and string[test_index] == '\\': num_backslashes += 1 test_index -= 1 if num_backslashes % 2 == 0: first_pos = pos return first_pos
:param char: The character to find :type char: string :param string: The string in which to search for *char* :type string: string :returns: Index in *string* where *char* last appears (unescaped by a preceding "\\"), -1 if not found :rtype: int Finds the first occurrence of *char* in *string* in which *char* is not present as an escaped character.
def _wrpy_ncbi_gene_nts(fout_py, geneid2nt, log): """Write namedtuples to a dict in a Python module.""" num_genes = len(geneid2nt) with open(fout_py, 'w') as ofstrm: docstr = "Data downloaded from NCBI Gene converted into Python namedtuples." ofstrm.write('"""{PYDOC}"""\n\n'.format(PYDOC=docstr)) ofstrm.write("from collections import namedtuple\n\n") ofstrm.write('WRITTEN = "{DATE}"'.format( DATE=re.sub('-', '_', str(datetime.date.today())))) ofstrm.write(' # {N} items\n\n'.format(N=num_genes)) ntd = next(iter(geneid2nt.values())) # Access one dictionary value in Python 2 ofstrm.write("#pylint: disable=line-too-long,too-many-lines,invalid-name\n") ofstrm.write("{NtName} = namedtuple('{NtName}', '{FLDS}')\n\n".format( NtName=type(ntd).__name__, FLDS=' '.join(ntd._fields))) ofstrm.write("GENEID2NT = {{ # {N:,} items\n".format(N=num_genes)) for geneid, ntd in sorted(geneid2nt.items(), key=lambda t: t[0]): ofstrm.write(" {GeneID} : {NT},\n".format(GeneID=geneid, NT=ntd)) ofstrm.write("}\n") log.write(" {N:9} geneids WROTE: {PY}\n".format(N=num_genes, PY=fout_py))
Write namedtuples to a dict in a Python module.
def printc(cls, txt, color=colors.red): """Print in color.""" print(cls.color_txt(txt, color))
Print in color.
def next(self): """ #TODO: docstring :returns: #TODO: docstring """ try: self.event, self.element = next(self.iterator) self.elementTag = clearTag(self.element.tag) except StopIteration: clearParsedElements(self.element) raise StopIteration return self.event, self.element, self.elementTag
#TODO: docstring :returns: #TODO: docstring
def _siftdown_max(heap, startpos, pos): 'Maxheap variant of _siftdown' newitem = heap[pos] # Follow the path to the root, moving parents down until finding a place # newitem fits. while pos > startpos: parentpos = (pos - 1) >> 1 parent = heap[parentpos] if parent < newitem: heap[pos] = parent pos = parentpos continue break heap[pos] = newitem
Maxheap variant of _siftdown
def list_subgroups_global(self, id): """ List subgroups. List the immediate OutcomeGroup children of the outcome group. Paginated. """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id self.logger.debug("GET /api/v1/global/outcome_groups/{id}/subgroups with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/global/outcome_groups/{id}/subgroups".format(**path), data=data, params=params, all_pages=True)
List subgroups. List the immediate OutcomeGroup children of the outcome group. Paginated.
def getUsers(context, roles, allow_empty=True): """ Present a DisplayList containing users in the specified list of roles """ mtool = getToolByName(context, 'portal_membership') pairs = allow_empty and [['', '']] or [] users = mtool.searchForMembers(roles=roles) for user in users: uid = user.getId() fullname = user.getProperty('fullname') if not fullname: fullname = uid pairs.append((uid, fullname)) pairs.sort(lambda x, y: cmp(x[1], y[1])) return DisplayList(pairs)
Present a DisplayList containing users in the specified list of roles
def get_data_info(self): """ imports er tables and places data into Data_info data structure outlined bellow: Data_info - {er_samples: {er_samples.txt info} er_sites: {er_sites.txt info} er_locations: {er_locations.txt info} er_ages: {er_ages.txt info}} """ Data_info = {} data_er_samples = {} data_er_sites = {} data_er_locations = {} data_er_ages = {} if self.data_model == 3.0: print(("data model: %1.1f" % (self.data_model))) Data_info["er_samples"] = [] Data_info["er_sites"] = [] Data_info["er_locations"] = [] Data_info["er_ages"] = [] # self.magic_file may have a full path, but this breaks cb.Contribution # determine if magic_file exists in WD, and if it doesn't, copy it in magic_file_real = os.path.realpath(self.magic_file) magic_file_short = os.path.split(self.magic_file)[1] WD_file_real = os.path.realpath( os.path.join(self.WD, magic_file_short)) if magic_file_real == WD_file_real: fnames = {'measurements': magic_file_short} else: # copy measurements file to WD, keeping original name shutil.copy(magic_file_real, WD_file_real) fnames = {'measurements': magic_file_short} self.con = cb.Contribution(self.WD, custom_filenames=fnames, read_tables=[ 'measurements', 'specimens', 'samples', 'sites', 'locations', 'criteria', 'ages']) if 'specimens' in self.con.tables: spec_container = self.con.tables['specimens'] self.spec_data = spec_container.df else: self.con.add_empty_magic_table('specimens') self.spec_data = self.con.tables['specimens'].df if 'samples' in self.con.tables: samp_container = self.con.tables['samples'] samp_container.front_and_backfill(['azimuth', 'dip']) self.samp_data = samp_container.df samp_data2 = self.samp_data.rename( columns=map_magic.samp_magic3_2_magic2_map) data_er_samples = samp_data2.T.to_dict() else: self.con.add_empty_magic_table('samples') self.samp_data = self.con.tables['samples'].df if 'sites' in self.con.tables: site_container = self.con.tables['sites'] self.site_data = site_container.df if 'age' in self.site_data.columns: self.site_data = self.site_data[self.site_data['age'].notnull( )] age_ids = [col for col in self.site_data.columns if col.startswith( "age") or col == "site"] age_data = self.site_data[age_ids].rename( columns=map_magic.site_magic3_2_magic2_map) # save this in 2.5 format er_ages = age_data.to_dict('records') data_er_ages = {} for s in er_ages: s = self.convert_ages_to_calendar_year(s) data_er_ages[s['er_site_name']] = s sites = self.site_data.rename( columns=map_magic.site_magic3_2_magic2_map) # pick out what is needed by thellier_gui and put in 2.5 format er_sites = sites.to_dict('records') data_er_sites = {} for s in er_sites: data_er_sites[s['er_site_name']] = s else: self.con.add_empty_magic_table('sites') self.site_data = self.con.tables['sites'].df if 'locations' in self.con.tables: location_container = self.con.tables["locations"] self.loc_data = location_container.df # only need this for saving tables if self.loc_data['location'].isnull().any(): self.loc_data.replace( {'location': {None: 'unknown'}}, inplace=True) self.loc_data.set_index('location', inplace=True) self.loc_data['location'] = self.loc_data.index loc2_data = self.loc_data.rename( columns=map_magic.loc_magic3_2_magic2_map) data_er_locations = loc2_data.to_dict('index') else: self.con.add_empty_magic_table('locations') self.loc_data = self.con.tables['locations'].df else: # try 2.5 data model print(("data model: %1.1f" % (self.data_model))) self.read_magic_file(os.path.join( self.WD, "er_samples.txt"), 'er_sample_name') try: data_er_samples = self.read_magic_file( os.path.join(self.WD, "er_samples.txt"), 'er_sample_name') except: print("-W- Can't find er_sample.txt in project directory") try: data_er_sites = self.read_magic_file( os.path.join(self.WD, "er_sites.txt"), 'er_site_name') except: print("-W- Can't find er_sites.txt in project directory") try: data_er_locations = self.read_magic_file(os.path.join( self.WD, "er_locations.txt"), 'er_location_name') except: print("-W- Can't find er_locations.txt in project directory") try: data_er_ages = self.read_magic_file( os.path.join(self.WD, "er_ages.txt"), 'er_sample_name') except: try: data_er_ages = self.read_magic_file( os.path.join(self.WD, "er_ages.txt"), 'er_site_name') except: print("-W- Can't find er_ages in project directory") Data_info["er_samples"] = data_er_samples Data_info["er_sites"] = data_er_sites Data_info["er_locations"] = data_er_locations Data_info["er_ages"] = data_er_ages return(Data_info)
imports er tables and places data into Data_info data structure outlined bellow: Data_info - {er_samples: {er_samples.txt info} er_sites: {er_sites.txt info} er_locations: {er_locations.txt info} er_ages: {er_ages.txt info}}
def get_sideplot_ranges(plot, element, main, ranges): """ Utility to find the range for an adjoined plot given the plot, the element, the Element the plot is adjoined to and the dictionary of ranges. """ key = plot.current_key dims = element.dimensions() dim = dims[0] if 'frequency' in dims[1].name else dims[1] range_item = main if isinstance(main, HoloMap): if issubclass(main.type, CompositeOverlay): range_item = [hm for hm in main._split_overlays()[1] if dim in hm.dimensions('all')][0] else: range_item = HoloMap({0: main}, kdims=['Frame']) ranges = match_spec(range_item.last, ranges) if dim.name in ranges: main_range = ranges[dim.name]['combined'] else: framewise = plot.lookup_options(range_item.last, 'norm').options.get('framewise') if framewise and range_item.get(key, False): main_range = range_item[key].range(dim) else: main_range = range_item.range(dim) # If .main is an NdOverlay or a HoloMap of Overlays get the correct style if isinstance(range_item, HoloMap): range_item = range_item.last if isinstance(range_item, CompositeOverlay): range_item = [ov for ov in range_item if dim in ov.dimensions('all')][0] return range_item, main_range, dim
Utility to find the range for an adjoined plot given the plot, the element, the Element the plot is adjoined to and the dictionary of ranges.
def _checkremove_que(self, word): """If word ends in -que and if word is not in pass list, strip -que""" in_que_pass_list = False que_pass_list = ['atque', 'quoque', 'neque', 'itaque', 'absque', 'apsque', 'abusque', 'adaeque', 'adusque', 'denique', 'deque', 'susque', 'oblique', 'peraeque', 'plenisque', 'quandoque', 'quisque', 'quaeque', 'cuiusque', 'cuique', 'quemque', 'quamque', 'quaque', 'quique', 'quorumque', 'quarumque', 'quibusque', 'quosque', 'quasque', 'quotusquisque', 'quousque', 'ubique', 'undique', 'usque', 'uterque', 'utique', 'utroque', 'utribique', 'torque', 'coque', 'concoque', 'contorque', 'detorque', 'decoque', 'excoque', 'extorque', 'obtorque', 'optorque', 'retorque', 'recoque', 'attorque', 'incoque', 'intorque', 'praetorque'] if word not in que_pass_list: word = re.sub(r'que$', '', word) else: in_que_pass_list = True return word, in_que_pass_list
If word ends in -que and if word is not in pass list, strip -que
def streamify(self, state, frame): """Prepare frame for output as a byte-stuffed stream.""" # Split the frame apart for stuffing... pieces = frame.split(self.prefix) return '%s%s%s%s%s' % (self.prefix, self.begin, (self.prefix + self.nop).join(pieces), self.prefix, self.end)
Prepare frame for output as a byte-stuffed stream.
def stop_gradient(self, stop_layers, bigdl_type="float"): """ stop the input gradient of layers that match the given ```names``` their input gradient are not computed. And they will not contributed to the input gradient computation of layers that depend on them. :param stop_layers: an array of layer names :param bigdl_type: :return: """ callBigDlFunc(bigdl_type, "setStopGradient", self.value, stop_layers) return self
stop the input gradient of layers that match the given ```names``` their input gradient are not computed. And they will not contributed to the input gradient computation of layers that depend on them. :param stop_layers: an array of layer names :param bigdl_type: :return:
def format_tb(tb=None, limit=None, allLocals=None, allGlobals=None, withTitle=False, with_color=None, with_vars=None): """ :param types.TracebackType|types.FrameType|StackSummary tb: traceback. if None, will use sys._getframe :param int|None limit: limit the traceback to this number of frames. by default, will look at sys.tracebacklimit :param dict[str]|None allLocals: if set, will update it with all locals from all frames :param dict[str]|None allGlobals: if set, will update it with all globals from all frames :param bool withTitle: :param bool|None with_color: output with ANSI escape codes for color :param bool with_vars: will print var content which are referenced in the source code line. by default enabled. :return: list of strings (line-based) :rtype: list[str] """ color = Color(enable=with_color) output = _Output(color=color) def format_filename(s): """ :param str s: :rtype: str """ base = os.path.basename(s) return ( color('"' + s[:-len(base)], color.fg_colors[2]) + color(base, color.fg_colors[2], bold=True) + color('"', color.fg_colors[2])) format_py_obj = output.pretty_print if tb is None: # noinspection PyBroadException try: tb = get_current_frame() assert tb except Exception: output(color("format_tb: tb is None and sys._getframe() failed", color.fg_colors[1], bold=True)) return output.lines def is_stack_summary(_tb): """ :param StackSummary|object _tb: :rtype: bool """ return isinstance(_tb, StackSummary) isframe = inspect.isframe if withTitle: if isframe(tb) or is_stack_summary(tb): output(color('Traceback (most recent call first):', color.fg_colors[0])) else: # expect traceback-object (or compatible) output(color('Traceback (most recent call last):', color.fg_colors[0])) if with_vars is None and is_at_exit(): # Better to not show __repr__ of some vars, as this might lead to crashes # when native extensions are involved. with_vars = False if withTitle: output("(Exclude vars because we are exiting.)") if with_vars is None: if any([f.f_code.co_name == "__del__" for f in iter_traceback()]): # __del__ is usually called via the Python garbage collector (GC). # This can happen and very random / non-deterministic places. # There are cases where it is not safe to access some of the vars on the stack # because they might be in a non-well-defined state, thus calling their __repr__ is not safe. # See e.g. this bug: # https://github.com/tensorflow/tensorflow/issues/22770 with_vars = False if withTitle: output("(Exclude vars because we are on a GC stack.)") if with_vars is None: with_vars = True # noinspection PyBroadException try: if limit is None: if hasattr(sys, 'tracebacklimit'): limit = sys.tracebacklimit n = 0 _tb = tb class NotFound(Exception): """ Identifier not found. """ def _resolve_identifier(namespace, keys): """ :param dict[str] namespace: :param tuple[str] keys: :return: namespace[name[0]][name[1]]... """ if keys[0] not in namespace: raise NotFound() obj = namespace[keys[0]] for part in keys[1:]: obj = getattr(obj, part) return obj # noinspection PyShadowingNames def _try_set(old, prefix, func): """ :param None|str old: :param str prefix: :param func: :return: old """ if old is not None: return old try: return add_indent_lines(prefix, func()) except NotFound: return old except Exception as e: return prefix + "!" + e.__class__.__name__ + ": " + str(e) while _tb is not None and (limit is None or n < limit): if isframe(_tb): f = _tb elif is_stack_summary(_tb): if isinstance(_tb[0], ExtendedFrameSummary): f = _tb[0].tb_frame else: f = DummyFrame.from_frame_summary(_tb[0]) else: f = _tb.tb_frame if allLocals is not None: allLocals.update(f.f_locals) if allGlobals is not None: allGlobals.update(f.f_globals) if hasattr(_tb, "tb_lineno"): lineno = _tb.tb_lineno elif is_stack_summary(_tb): lineno = _tb[0].lineno else: lineno = f.f_lineno co = f.f_code filename = co.co_filename name = co.co_name file_descr = "".join([ ' ', color("File ", color.fg_colors[0], bold=True), format_filename(filename), ", ", color("line ", color.fg_colors[0]), color("%d" % lineno, color.fg_colors[4]), ", ", color("in ", color.fg_colors[0]), name]) with output.fold_text_ctx(file_descr): if not os.path.isfile(filename): alt_fn = fallback_findfile(filename) if alt_fn: output( color(" -- couldn't find file, trying this instead: ", color.fg_colors[0]) + format_filename(alt_fn)) filename = alt_fn source_code = get_source_code(filename, lineno, f.f_globals) if source_code: source_code = remove_indent_lines(replace_tab_indents(source_code)).rstrip() output(" line: ", color.py_syntax_highlight(source_code), color=color.fg_colors[0]) if not with_vars: pass elif isinstance(f, DummyFrame) and not f.have_vars_available: pass else: with output.fold_text_ctx(color(' locals:', color.fg_colors[0])): already_printed_locals = set() # type: typing.Set[typing.Tuple[str,...]] for token_str in grep_full_py_identifiers(parse_py_statement(source_code)): splitted_token = tuple(token_str.split(".")) for token in [splitted_token[0:i] for i in range(1, len(splitted_token) + 1)]: if token in already_printed_locals: continue token_value = None token_value = _try_set( token_value, color("<local> ", color.fg_colors[0]), lambda: format_py_obj(_resolve_identifier(f.f_locals, token))) token_value = _try_set( token_value, color("<global> ", color.fg_colors[0]), lambda: format_py_obj(_resolve_identifier(f.f_globals, token))) token_value = _try_set( token_value, color("<builtin> ", color.fg_colors[0]), lambda: format_py_obj(_resolve_identifier(f.f_builtins, token))) token_value = token_value or color("<not found>", color.fg_colors[0]) prefix = ( ' %s ' % color(".", color.fg_colors[0], bold=True).join(token) + color("= ", color.fg_colors[0], bold=True)) output(prefix, token_value) already_printed_locals.add(token) if len(already_printed_locals) == 0: output(color(" no locals", color.fg_colors[0])) else: output(color(' -- code not available --', color.fg_colors[0])) if isframe(_tb): _tb = _tb.f_back elif is_stack_summary(_tb): _tb = StackSummary.from_list(_tb[1:]) if not _tb: _tb = None else: _tb = _tb.tb_next n += 1 except Exception: output(color("ERROR: cannot get more detailed exception info because:", color.fg_colors[1], bold=True)) import traceback for l in traceback.format_exc().split("\n"): output(" " + l) return output.lines
:param types.TracebackType|types.FrameType|StackSummary tb: traceback. if None, will use sys._getframe :param int|None limit: limit the traceback to this number of frames. by default, will look at sys.tracebacklimit :param dict[str]|None allLocals: if set, will update it with all locals from all frames :param dict[str]|None allGlobals: if set, will update it with all globals from all frames :param bool withTitle: :param bool|None with_color: output with ANSI escape codes for color :param bool with_vars: will print var content which are referenced in the source code line. by default enabled. :return: list of strings (line-based) :rtype: list[str]
def set(self, name, default=0, editable=True, description=""): '''Define a variable in DB and in memory''' var, created = ConfigurationVariable.objects.get_or_create(name=name) if created: var.value = default if not editable: var.value = default var.editable = editable var.description = description var.save(reload=False) # ATTRIBUTES is accesible by any instance of VariablesManager self.ATTRIBUTES[var.name] = var.value
Define a variable in DB and in memory
def fnFromDate(self, date): """Get filename from date.""" fn = time.strftime('comics-%Y%m%d', date) fn = os.path.join(self.basepath, 'html', fn + ".html") fn = os.path.abspath(fn) return fn
Get filename from date.
def from_node(index, value): """ >>> h = TimelineHistory.from_node(1, 2) >>> h.lines [] """ try: lines = json.loads(value) except (TypeError, ValueError): lines = None if not isinstance(lines, list): lines = [] return TimelineHistory(index, value, lines)
>>> h = TimelineHistory.from_node(1, 2) >>> h.lines []
def run_top_task(self, task_name=None, sort=None, **kwargs): """Finds and runs a pending task that in the first of the sorting list. Parameters ----------- task_name : str The task name. sort : List of tuple PyMongo sort comment, search "PyMongo find one sorting" and `collection level operations <http://api.mongodb.com/python/current/api/pymongo/collection.html>`__ for more details. kwargs : other parameters Users customized parameters such as description, version number. Examples --------- Monitors the database and pull tasks to run >>> while True: >>> print("waiting task from distributor") >>> db.run_top_task(task_name='mnist', sort=[("time", -1)]) >>> time.sleep(1) Returns -------- boolean : True for success, False for fail. """ if not isinstance(task_name, str): # is None: raise Exception("task_name should be string") self._fill_project_info(kwargs) kwargs.update({'status': 'pending'}) # find task and set status to running task = self.db.Task.find_one_and_update(kwargs, {'$set': {'status': 'running'}}, sort=sort) try: # get task info e.g. hyper parameters, python script if task is None: logging.info("[Database] Find Task FAIL: key: {} sort: {}".format(task_name, sort)) return False else: logging.info("[Database] Find Task SUCCESS: key: {} sort: {}".format(task_name, sort)) _datetime = task['time'] _script = task['script'] _id = task['_id'] _hyper_parameters = task['hyper_parameters'] _saved_result_keys = task['saved_result_keys'] logging.info(" hyper parameters:") for key in _hyper_parameters: globals()[key] = _hyper_parameters[key] logging.info(" {}: {}".format(key, _hyper_parameters[key])) # run task s = time.time() logging.info("[Database] Start Task: key: {} sort: {} push time: {}".format(task_name, sort, _datetime)) _script = _script.decode('utf-8') with tf.Graph().as_default(): # as graph: # clear all TF graphs exec(_script, globals()) # set status to finished _ = self.db.Task.find_one_and_update({'_id': _id}, {'$set': {'status': 'finished'}}) # return results __result = {} for _key in _saved_result_keys: logging.info(" result: {}={} {}".format(_key, globals()[_key], type(globals()[_key]))) __result.update({"%s" % _key: globals()[_key]}) _ = self.db.Task.find_one_and_update( { '_id': _id }, {'$set': { 'result': __result }}, return_document=pymongo.ReturnDocument.AFTER ) logging.info( "[Database] Finished Task: task_name - {} sort: {} push time: {} took: {}s". format(task_name, sort, _datetime, time.time() - s) ) return True except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] logging.info("{} {} {} {} {}".format(exc_type, exc_obj, fname, exc_tb.tb_lineno, e)) logging.info("[Database] Fail to run task") # if fail, set status back to pending _ = self.db.Task.find_one_and_update({'_id': _id}, {'$set': {'status': 'pending'}}) return False
Finds and runs a pending task that in the first of the sorting list. Parameters ----------- task_name : str The task name. sort : List of tuple PyMongo sort comment, search "PyMongo find one sorting" and `collection level operations <http://api.mongodb.com/python/current/api/pymongo/collection.html>`__ for more details. kwargs : other parameters Users customized parameters such as description, version number. Examples --------- Monitors the database and pull tasks to run >>> while True: >>> print("waiting task from distributor") >>> db.run_top_task(task_name='mnist', sort=[("time", -1)]) >>> time.sleep(1) Returns -------- boolean : True for success, False for fail.
def logReload(options): """ encompasses all the logic for reloading observer. """ event_handler = Reload(options) observer = Observer() observer.schedule(event_handler, path='.', recursive=True) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() pid = os.getpid() chalk.eraser() chalk.green('\nHendrix successfully closed.') os.kill(pid, 15) observer.join() exit('\n')
encompasses all the logic for reloading observer.
def set_log_file_maximum_size(self, logFileMaxSize): """ Set the log file maximum size in megabytes :Parameters: #. logFileMaxSize (number): The maximum size in Megabytes of a logging file. Once exceeded, another logging file as logFileBasename_N.logFileExtension will be created. Where N is an automatically incremented number. """ assert _is_number(logFileMaxSize), "logFileMaxSize must be a number" logFileMaxSize = float(logFileMaxSize) assert logFileMaxSize>=1, "logFileMaxSize minimum size is 1 megabytes" self.__maxlogFileSize = logFileMaxSize
Set the log file maximum size in megabytes :Parameters: #. logFileMaxSize (number): The maximum size in Megabytes of a logging file. Once exceeded, another logging file as logFileBasename_N.logFileExtension will be created. Where N is an automatically incremented number.
def create_refresh_token(self, access_token_value): # type: (str) -> str """ Creates an refresh token bound to the specified access token. """ if access_token_value not in self.access_tokens: raise InvalidAccessToken('{} unknown'.format(access_token_value)) if not self.refresh_token_lifetime: logger.debug('no refresh token issued for for access_token=%s', access_token_value) return None refresh_token = rand_str() authz_info = {'access_token': access_token_value, 'exp': int(time.time()) + self.refresh_token_lifetime} self.refresh_tokens[refresh_token] = authz_info logger.debug('issued refresh_token=%s expiring=%d for access_token=%s', refresh_token, authz_info['exp'], access_token_value) return refresh_token
Creates an refresh token bound to the specified access token.
def parse_expmethodresponse(self, tup_tree): # pylint: disable=unused-argument """ This function not implemented. """ raise CIMXMLParseError( _format("Internal Error: Parsing support for element {0!A} is not " "implemented", name(tup_tree)), conn_id=self.conn_id)
This function not implemented.
def normalize_uri(u: URI) -> URIRef: """ Return a URIRef for a str or URIRef """ return u if isinstance(u, URIRef) else URIRef(str(u))
Return a URIRef for a str or URIRef
def serialize_raw_master_key_prefix(raw_master_key): """Produces the prefix that a RawMasterKey will always use for the key_info value of keys which require additional information. :param raw_master_key: RawMasterKey for which to produce a prefix :type raw_master_key: aws_encryption_sdk.key_providers.raw.RawMasterKey :returns: Serialized key_info prefix :rtype: bytes """ if raw_master_key.config.wrapping_key.wrapping_algorithm.encryption_type is EncryptionType.ASYMMETRIC: return to_bytes(raw_master_key.key_id) return struct.pack( ">{}sII".format(len(raw_master_key.key_id)), to_bytes(raw_master_key.key_id), # Tag Length is stored in bits, not bytes raw_master_key.config.wrapping_key.wrapping_algorithm.algorithm.tag_len * 8, raw_master_key.config.wrapping_key.wrapping_algorithm.algorithm.iv_len, )
Produces the prefix that a RawMasterKey will always use for the key_info value of keys which require additional information. :param raw_master_key: RawMasterKey for which to produce a prefix :type raw_master_key: aws_encryption_sdk.key_providers.raw.RawMasterKey :returns: Serialized key_info prefix :rtype: bytes
def docs(context: Context): """ Generates static documentation """ try: from sphinx.application import Sphinx except ImportError: context.pip_command('install', 'Sphinx') from sphinx.application import Sphinx context.shell('cp', 'README.rst', 'docs/README.rst') app = Sphinx('docs', 'docs', 'docs/build', 'docs/build/.doctrees', buildername='html', parallel=True, verbosity=context.verbosity) app.build()
Generates static documentation
def Add(self, service, method, request, global_params=None): """Add a request to the batch. Args: service: A class inheriting base_api.BaseApiService. method: A string indicated desired method from the service. See the example in the class docstring. request: An input message appropriate for the specified service.method. global_params: Optional additional parameters to pass into method.PrepareHttpRequest. Returns: None """ # Retrieve the configs for the desired method and service. method_config = service.GetMethodConfig(method) upload_config = service.GetUploadConfig(method) # Prepare the HTTP Request. http_request = service.PrepareHttpRequest( method_config, request, global_params=global_params, upload_config=upload_config) # Create the request and add it to our master list. api_request = self.ApiCall( http_request, self.retryable_codes, service, method_config) self.api_requests.append(api_request)
Add a request to the batch. Args: service: A class inheriting base_api.BaseApiService. method: A string indicated desired method from the service. See the example in the class docstring. request: An input message appropriate for the specified service.method. global_params: Optional additional parameters to pass into method.PrepareHttpRequest. Returns: None
def _read_fd(file_descr): """ Read incoming data from file handle. Then find the matching StreamDescriptor by file_descr value. :param file_descr: file object :return: Return number of bytes read """ try: line = os.read(file_descr, 1024 * 1024) except OSError: stream_desc = NonBlockingStreamReader._get_sd(file_descr) if stream_desc is not None: stream_desc.has_error = True if stream_desc.callback is not None: stream_desc.callback() return 0 if line: stream_desc = NonBlockingStreamReader._get_sd(file_descr) if stream_desc is None: return 0 # Process closing if IS_PYTHON3: try: # @TODO: further develop for not ascii/unicode binary content line = line.decode("ascii") except UnicodeDecodeError: line = repr(line) stream_desc.buf += line # Break lines split = stream_desc.buf.split(os.linesep) for line in split[:-1]: stream_desc.read_queue.appendleft(strip_escape(line.strip())) if stream_desc.callback is not None: stream_desc.callback() # Store the remainded, its either '' if last char was '\n' # or remaining buffer before line end stream_desc.buf = split[-1] return len(line) return 0
Read incoming data from file handle. Then find the matching StreamDescriptor by file_descr value. :param file_descr: file object :return: Return number of bytes read
def extra_context(self, request, context): """Call the PAGE_EXTRA_CONTEXT function if there is one.""" if settings.PAGE_EXTRA_CONTEXT: context.update(settings.PAGE_EXTRA_CONTEXT())
Call the PAGE_EXTRA_CONTEXT function if there is one.
def posthoc_tamhane(a, val_col=None, group_col=None, welch=True, sort=False): '''Tamhane's T2 all-pairs comparison test for normally distributed data with unequal variances. Tamhane's T2 test can be performed for all-pairs comparisons in an one-factorial layout with normally distributed residuals but unequal groups variances. A total of m = k(k-1)/2 hypotheses can be tested. The null hypothesis is tested in the two-tailed test against the alternative hypothesis [1]_. Parameters ---------- a : array_like or pandas DataFrame object An array, any object exposing the array interface or a pandas DataFrame. val_col : str, optional Name of a DataFrame column that contains dependent variable values (test or response variable). Values should have a non-nominal scale. Must be specified if `a` is a pandas DataFrame object. group_col : str, optional Name of a DataFrame column that contains independent variable values (grouping or predictor variable). Values should have a nominal scale (categorical). Must be specified if `a` is a pandas DataFrame object. welch : bool, optional If True, use Welch's approximate solution for calculating the degree of freedom. T2 test uses the usual df = N - 2 approximation. sort : bool, optional If True, sort data by block and group columns. Returns ------- result : pandas DataFrame P values. Notes ----- The p values are computed from the t-distribution and adjusted according to Dunn-Sidak. References ---------- .. [1] A.C. Tamhane (1979), A Comparison of Procedures for Multiple Comparisons of Means with Unequal Variances. Journal of the American Statistical Association, 74, 471-480. Examples -------- >>> import scikit_posthocs as sp >>> import pandas as pd >>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]}) >>> x = x.melt(var_name='groups', value_name='values') >>> sp.posthoc_tamhane(x, val_col='values', group_col='groups') ''' x, _val_col, _group_col = __convert_to_df(a, val_col, group_col) if not sort: x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True) x.sort_values(by=[_group_col], ascending=True, inplace=True) groups = x[_group_col].unique() x_grouped = x.groupby(_group_col)[_val_col] ni = x_grouped.count() n = ni.sum() xi = x_grouped.mean() si = x_grouped.var() sin = 1. / (n - groups.size) * np.sum(si * (ni - 1)) def compare(i, j): dif = xi[i] - xi[j] A = si[i] / ni[i] + si[j] / ni[j] t_val = dif / np.sqrt(A) if welch: df = A ** 2. / (si[i] ** 2. / (ni[i] ** 2. * (ni[i] - 1.)) + si[j] ** 2. / (ni[j] ** 2. * (ni[j] - 1.))) else: ## checks according to Tamhane (1979, p. 474) ok1 = (9./10. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 10./9.) ok2 = (9./10. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 10./9.) ok3 = (4./5. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 5./4.) and (1./2. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 2.) ok4 = (2./3. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 3./2.) and (3./4. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 4./3.) OK = any(ok1, ok2, ok3, ok4) if not OK: print("Sample sizes or standard errors are not balanced. T2 test is recommended.") df = ni[i] + ni[j] - 2. p_val = 2. * ss.t.sf(np.abs(t_val), df=df) return p_val vs = np.zeros((groups.size, groups.size), dtype=np.float) tri_upper = np.triu_indices(vs.shape[0], 1) tri_lower = np.tril_indices(vs.shape[0], -1) vs[:,:] = 0 combs = it.combinations(range(groups.size), 2) for i,j in combs: vs[i, j] = compare(groups[i], groups[j]) vs[tri_upper] = 1. - (1. - vs[tri_upper]) ** groups.size vs[tri_lower] = vs.T[tri_lower] vs[vs > 1] = 1 np.fill_diagonal(vs, -1) return DataFrame(vs, index=groups, columns=groups)
Tamhane's T2 all-pairs comparison test for normally distributed data with unequal variances. Tamhane's T2 test can be performed for all-pairs comparisons in an one-factorial layout with normally distributed residuals but unequal groups variances. A total of m = k(k-1)/2 hypotheses can be tested. The null hypothesis is tested in the two-tailed test against the alternative hypothesis [1]_. Parameters ---------- a : array_like or pandas DataFrame object An array, any object exposing the array interface or a pandas DataFrame. val_col : str, optional Name of a DataFrame column that contains dependent variable values (test or response variable). Values should have a non-nominal scale. Must be specified if `a` is a pandas DataFrame object. group_col : str, optional Name of a DataFrame column that contains independent variable values (grouping or predictor variable). Values should have a nominal scale (categorical). Must be specified if `a` is a pandas DataFrame object. welch : bool, optional If True, use Welch's approximate solution for calculating the degree of freedom. T2 test uses the usual df = N - 2 approximation. sort : bool, optional If True, sort data by block and group columns. Returns ------- result : pandas DataFrame P values. Notes ----- The p values are computed from the t-distribution and adjusted according to Dunn-Sidak. References ---------- .. [1] A.C. Tamhane (1979), A Comparison of Procedures for Multiple Comparisons of Means with Unequal Variances. Journal of the American Statistical Association, 74, 471-480. Examples -------- >>> import scikit_posthocs as sp >>> import pandas as pd >>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]}) >>> x = x.melt(var_name='groups', value_name='values') >>> sp.posthoc_tamhane(x, val_col='values', group_col='groups')
def invokeCompletionIfAvailable(self, requestedByUser=False): """Invoke completion, if available. Called after text has been typed in qpart Returns True, if invoked """ if self._qpart.completionEnabled and self._wordSet is not None: wordBeforeCursor = self._wordBeforeCursor() wholeWord = wordBeforeCursor + self._wordAfterCursor() forceShow = requestedByUser or self._completionOpenedManually if wordBeforeCursor: if len(wordBeforeCursor) >= self._qpart.completionThreshold or forceShow: if self._widget is None: model = _CompletionModel(self._wordSet) model.setData(wordBeforeCursor, wholeWord) if self._shouldShowModel(model, forceShow): self._createWidget(model) return True else: self._widget.model().setData(wordBeforeCursor, wholeWord) if self._shouldShowModel(self._widget.model(), forceShow): self._widget.updateGeometry() return True self._closeCompletion() return False
Invoke completion, if available. Called after text has been typed in qpart Returns True, if invoked
def get_current_desktop(self): """ Get the current desktop. Uses ``_NET_CURRENT_DESKTOP`` of the EWMH spec. """ desktop = ctypes.c_long(0) _libxdo.xdo_get_current_desktop(self._xdo, ctypes.byref(desktop)) return desktop.value
Get the current desktop. Uses ``_NET_CURRENT_DESKTOP`` of the EWMH spec.
def crashlog_status(**kwargs): """ Show crashlogs status. """ ctx = Context(**kwargs) ctx.execute_action('crashlog:status', **{ 'storage': ctx.repo.create_secure_service('storage'), })
Show crashlogs status.
def upload(self, resource_id, data): """Update the request URI to upload the a document to this resource. Args: resource_id (integer): The group id. data (any): The raw data to upload. """ self.body = data self.content_type = 'application/octet-stream' self.resource_id(str(resource_id)) self._request_uri = '{}/upload'.format(self._request_uri)
Update the request URI to upload the a document to this resource. Args: resource_id (integer): The group id. data (any): The raw data to upload.
def on_plugin_install(plugin_directory, ostream=sys.stdout): ''' Run ``on_plugin_install`` script for specified plugin directory (if available). **TODO** Add support for Linux, OSX. Parameters ---------- plugin_directory : str File system to plugin directory. ostream :file-like Output stream for status messages (default: ``sys.stdout``). ''' current_directory = os.getcwd() plugin_directory = ph.path(plugin_directory).realpath() print >> ostream, ('Processing post-install hook for: ' '{}'.format(plugin_directory.name)) hooks_dir_i = plugin_directory.joinpath('hooks/Windows').realpath() hook_path_i = hooks_dir_i.joinpath('on_plugin_install.bat') if hook_path_i.isfile(): logger.info('Processing post-install hook for: %s', plugin_directory.name) os.chdir(hook_path_i.parent) try: process = sp.Popen([hook_path_i, sys.executable], shell=True, stdin=sp.PIPE) # Emulate return key press in case plugin uses # "Press <enter> key to continue..." prompt. process.communicate(input='\r\n') if process.returncode != 0: raise RuntimeError('Process return code == {}' .format(process.returncode)) return hook_path_i except Exception, exception: raise RuntimeError('Error running: {}\n{}'.format(hook_path_i, exception)) finally: os.chdir(current_directory)
Run ``on_plugin_install`` script for specified plugin directory (if available). **TODO** Add support for Linux, OSX. Parameters ---------- plugin_directory : str File system to plugin directory. ostream :file-like Output stream for status messages (default: ``sys.stdout``).
def ResolvePrefix(self, subject, attribute_prefix, timestamp=None, limit=None): """Retrieve a set of value matching for this subject's attribute. Args: subject: The subject that we will search. attribute_prefix: The attribute prefix. timestamp: A range of times for consideration (In microseconds). Can be a constant such as ALL_TIMESTAMPS or NEWEST_TIMESTAMP or a tuple of ints (start, end). limit: The number of results to fetch. Returns: A list of (attribute, value string, timestamp). Values with the same attribute (happens when timestamp is not NEWEST_TIMESTAMP, but ALL_TIMESTAMPS or time range) are guaranteed to be ordered in the decreasing timestamp order. Raises: AccessError: if anything goes wrong. """ for _, values in self.MultiResolvePrefix([subject], attribute_prefix, timestamp=timestamp, limit=limit): values.sort(key=lambda a: a[0]) return values return []
Retrieve a set of value matching for this subject's attribute. Args: subject: The subject that we will search. attribute_prefix: The attribute prefix. timestamp: A range of times for consideration (In microseconds). Can be a constant such as ALL_TIMESTAMPS or NEWEST_TIMESTAMP or a tuple of ints (start, end). limit: The number of results to fetch. Returns: A list of (attribute, value string, timestamp). Values with the same attribute (happens when timestamp is not NEWEST_TIMESTAMP, but ALL_TIMESTAMPS or time range) are guaranteed to be ordered in the decreasing timestamp order. Raises: AccessError: if anything goes wrong.
def decode(self, data: bytes) -> bytes: """Decodes data according the specified Content-Encoding or Content-Transfer-Encoding headers value. """ if CONTENT_TRANSFER_ENCODING in self.headers: data = self._decode_content_transfer(data) if CONTENT_ENCODING in self.headers: return self._decode_content(data) return data
Decodes data according the specified Content-Encoding or Content-Transfer-Encoding headers value.
def apply_calibration(df, calibration_df, calibration): ''' Apply calibration values from `fit_fb_calibration` result to `calibration` object. ''' from dmf_control_board_firmware import FeedbackResults for i, (fb_resistor, R_fb, C_fb) in calibration_df[['fb_resistor', 'R_fb', 'C_fb']].iterrows(): calibration.R_fb[int(fb_resistor)] = R_fb calibration.C_fb[int(fb_resistor)] = C_fb cleaned_df = df.dropna() grouped = cleaned_df.groupby(['frequency', 'test_capacitor', 'repeat_index']) for (f, channel, repeat_index), group in grouped: r = FeedbackResults(group.V_actuation.iloc[0], f, 5.0, group.V_hv.values, group.hv_resistor.values, group.V_fb.values, group.fb_resistor.values, calibration) # Update the measured capacitance values based on the updated # calibration model. df.loc[group.index, 'C'] = r.capacitance()
Apply calibration values from `fit_fb_calibration` result to `calibration` object.
def from_config(cls, cp, data=None, delta_f=None, delta_t=None, gates=None, recalibration=None, **kwargs): """Initializes an instance of this class from the given config file. Parameters ---------- cp : WorkflowConfigParser Config file parser to read. data : dict A dictionary of data, in which the keys are the detector names and the values are the data. This is not retrieved from the config file, and so must be provided. delta_f : float The frequency spacing of the data; needed for waveform generation. delta_t : float The time spacing of the data; needed for time-domain waveform generators. recalibration : dict of pycbc.calibration.Recalibrate, optional Dictionary of detectors -> recalibration class instances for recalibrating data. gates : dict of tuples, optional Dictionary of detectors -> tuples of specifying gate times. The sort of thing returned by `pycbc.gate.gates_from_cli`. \**kwargs : All additional keyword arguments are passed to the class. Any provided keyword will over ride what is in the config file. """ prior_section = "marginalized_prior" args = cls._init_args_from_config(cp) marg_prior = read_distributions_from_config(cp, prior_section) if len(marg_prior) == 0: raise AttributeError("No priors are specified for the " "marginalization. Please specify this in a " "section in the config file with heading " "{}-variable".format(prior_section)) params = [i.params[0] for i in marg_prior] marg_args = [k for k, v in args.items() if "_marginalization" in k] if len(marg_args) != len(params): raise ValueError("There is not a prior for each keyword argument") kwargs['marg_prior'] = marg_prior for i in params: kwargs[i+"_marginalization"] = True args.update(kwargs) variable_params = args['variable_params'] args["data"] = data try: static_params = args['static_params'] except KeyError: static_params = {} # set up waveform generator try: approximant = static_params['approximant'] except KeyError: raise ValueError("no approximant provided in the static args") generator_function = generator.select_waveform_generator(approximant) waveform_generator = generator.FDomainDetFrameGenerator( generator_function, epoch=data.values()[0].start_time, variable_args=variable_params, detectors=data.keys(), delta_f=delta_f, delta_t=delta_t, recalib=recalibration, gates=gates, **static_params) args['waveform_generator'] = waveform_generator args["f_lower"] = static_params["f_lower"] return cls(**args)
Initializes an instance of this class from the given config file. Parameters ---------- cp : WorkflowConfigParser Config file parser to read. data : dict A dictionary of data, in which the keys are the detector names and the values are the data. This is not retrieved from the config file, and so must be provided. delta_f : float The frequency spacing of the data; needed for waveform generation. delta_t : float The time spacing of the data; needed for time-domain waveform generators. recalibration : dict of pycbc.calibration.Recalibrate, optional Dictionary of detectors -> recalibration class instances for recalibrating data. gates : dict of tuples, optional Dictionary of detectors -> tuples of specifying gate times. The sort of thing returned by `pycbc.gate.gates_from_cli`. \**kwargs : All additional keyword arguments are passed to the class. Any provided keyword will over ride what is in the config file.
def omit_deep(omit_props, dct): """ Implementation of omit that recurses. This tests the same keys at every level of dict and in lists :param omit_props: :param dct: :return: """ omit_partial = omit_deep(omit_props) if isinstance(dict, dct): # Filter out keys and then recurse on each value that wasn't filtered out return map_dict(omit_partial, compact_dict(omit(omit_props, dct))) if isinstance((list, tuple), dct): # run omit_deep on each value return map(omit_partial, dct) # scalar return dct
Implementation of omit that recurses. This tests the same keys at every level of dict and in lists :param omit_props: :param dct: :return:
def update_event_types(self): """Update event types in event type box.""" self.idx_evt_type.clear() self.idx_evt_type.setSelectionMode(QAbstractItemView.ExtendedSelection) event_types = sorted(self.parent.notes.annot.event_types, key=str.lower) for ty in event_types: item = QListWidgetItem(ty) self.idx_evt_type.addItem(item)
Update event types in event type box.
def predictions(self, stpid="", rt="", vid="", maxpredictions=""): """ Retrieve predictions for 1+ stops or 1+ vehicles. Arguments: `stpid`: unique ID number for bus stop (single or comma-seperated list or iterable) or `vid`: vehicle ID number (single or comma-seperated list or iterable) or `stpid` and `rt` `maxpredictions` (optional): limit number of predictions returned Response: `prd`: (prediction container) contains list of `tmstp`: when prediction was generated `typ`: prediction type ('A' = arrival, 'D' = departure) `stpid`: stop ID for prediction `stpnm`: stop name for prediction `vid`: vehicle ID for prediction `dstp`: vehicle distance to stop (feet) `rt`: bus route `des`: bus destination `prdtm`: ETA/ETD `dly`: True if bus delayed `tablockid`, `tatripid`, `zone`: internal, see `self.vehicles` http://realtime.portauthority.org/bustime/apidoc/v1/main.jsp?section=predictions.jsp """ if (stpid and vid) or (rt and vid): raise ValueError("These parameters cannot be specified simultaneously.") elif not (stpid or rt or vid): raise ValueError("You must specify a parameter.") if listlike(stpid): stpid = ",".join(stpid) if listlike(rt): rt = ",".join(rt) if listlike(vid): vid = ",".join(vid) if stpid or (rt and stpid) or vid: url = self.endpoint('PREDICTION', dict(rt=rt, stpid=stpid, vid=vid, top=maxpredictions)) return self.response(url)
Retrieve predictions for 1+ stops or 1+ vehicles. Arguments: `stpid`: unique ID number for bus stop (single or comma-seperated list or iterable) or `vid`: vehicle ID number (single or comma-seperated list or iterable) or `stpid` and `rt` `maxpredictions` (optional): limit number of predictions returned Response: `prd`: (prediction container) contains list of `tmstp`: when prediction was generated `typ`: prediction type ('A' = arrival, 'D' = departure) `stpid`: stop ID for prediction `stpnm`: stop name for prediction `vid`: vehicle ID for prediction `dstp`: vehicle distance to stop (feet) `rt`: bus route `des`: bus destination `prdtm`: ETA/ETD `dly`: True if bus delayed `tablockid`, `tatripid`, `zone`: internal, see `self.vehicles` http://realtime.portauthority.org/bustime/apidoc/v1/main.jsp?section=predictions.jsp
def hav_dist(locs1, locs2): """ Return a distance matrix between two set of coordinates. Use geometric distance (default) or haversine distance (if longlat=True). Parameters ---------- locs1 : numpy.array The first set of coordinates as [(long, lat), (long, lat)]. locs2 : numpy.array The second set of coordinates as [(long, lat), (long, lat)]. Returns ------- mat_dist : numpy.array The distance matrix between locs1 and locs2 """ # locs1 = np.radians(locs1) # locs2 = np.radians(locs2) cos_lat1 = np.cos(locs1[..., 0]) cos_lat2 = np.cos(locs2[..., 0]) cos_lat_d = np.cos(locs1[..., 0] - locs2[..., 0]) cos_lon_d = np.cos(locs1[..., 1] - locs2[..., 1]) return 6367000 * np.arccos( cos_lat_d - cos_lat1 * cos_lat2 * (1 - cos_lon_d))
Return a distance matrix between two set of coordinates. Use geometric distance (default) or haversine distance (if longlat=True). Parameters ---------- locs1 : numpy.array The first set of coordinates as [(long, lat), (long, lat)]. locs2 : numpy.array The second set of coordinates as [(long, lat), (long, lat)]. Returns ------- mat_dist : numpy.array The distance matrix between locs1 and locs2
def getSequenceCombinaisons(polymorphipolymorphicDnaSeqSeq, pos = 0) : """Takes a dna sequence with polymorphismes and returns all the possible sequences that it can yield""" if type(polymorphipolymorphicDnaSeqSeq) is not types.ListType : seq = list(polymorphipolymorphicDnaSeqSeq) else : seq = polymorphipolymorphicDnaSeqSeq if pos >= len(seq) : return [''.join(seq)] variants = [] if seq[pos] in polymorphicNucleotides : chars = decodePolymorphicNucleotide(seq[pos]) else : chars = seq[pos]#.split('/') for c in chars : rseq = copy.copy(seq) rseq[pos] = c variants.extend(getSequenceCombinaisons(rseq, pos + 1)) return variants
Takes a dna sequence with polymorphismes and returns all the possible sequences that it can yield
def _get_model_table(self, part): """ Returns a list that represents the table. :param part: The table header, table footer or table body. :type part: hatemile.util.html.htmldomelement.HTMLDOMElement :return: The list that represents the table. :rtype: list(list(hatemile.util.html.htmldomelement.HTMLDOMElement)) """ rows = self.parser.find(part).find_children('tr').list_results() table = [] for row in rows: table.append(self._get_model_row(self.parser.find( row ).find_children('td,th').list_results())) return self._get_valid_model_table(table)
Returns a list that represents the table. :param part: The table header, table footer or table body. :type part: hatemile.util.html.htmldomelement.HTMLDOMElement :return: The list that represents the table. :rtype: list(list(hatemile.util.html.htmldomelement.HTMLDOMElement))
def calcNewEdges(wcs, shape): """ This method will compute sky coordinates for all the pixels around the edge of an image AFTER applying the geometry model. Parameters ---------- wcs : obj HSTWCS object for image shape : tuple numpy shape tuple for size of image Returns ------- border : arr array which contains the new positions for all pixels around the border of the edges in alpha,dec """ naxis1 = shape[1] naxis2 = shape[0] # build up arrays for pixel positions for the edges # These arrays need to be: array([(x,y),(x1,y1),...]) numpix = naxis1*2 + naxis2*2 border = np.zeros(shape=(numpix,2),dtype=np.float64) # Now determine the appropriate values for this array # We also need to account for any subarray offsets xmin = 1. xmax = naxis1 ymin = 1. ymax = naxis2 # Build range of pixel values for each side # Add 1 to make them consistent with pixel numbering in IRAF # Also include the LTV offsets to represent position in full chip # since the model works relative to full chip positions. xside = np.arange(naxis1) + xmin yside = np.arange(naxis2) + ymin #Now apply them to the array to generate the appropriate tuples #bottom _range0 = 0 _range1 = naxis1 border[_range0:_range1,0] = xside border[_range0:_range1,1] = ymin #top _range0 = _range1 _range1 = _range0 + naxis1 border[_range0:_range1,0] = xside border[_range0:_range1,1] = ymax #left _range0 = _range1 _range1 = _range0 + naxis2 border[_range0:_range1,0] = xmin border[_range0:_range1,1] = yside #right _range0 = _range1 _range1 = _range0 + naxis2 border[_range0:_range1,0] = xmax border[_range0:_range1,1] = yside edges = wcs.all_pix2world(border[:,0],border[:,1],1) return edges
This method will compute sky coordinates for all the pixels around the edge of an image AFTER applying the geometry model. Parameters ---------- wcs : obj HSTWCS object for image shape : tuple numpy shape tuple for size of image Returns ------- border : arr array which contains the new positions for all pixels around the border of the edges in alpha,dec
def get_asset_lookup_session_for_repository(self, repository_id, proxy, *args, **kwargs): """Gets the OsidSession associated with the asset lookup service for the given repository. arg: repository_id (osid.id.Id): the Id of the repository arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.AssetLookupSession) - the new AssetLookupSession raise: NotFound - repository_id not found raise: NullArgument - repository_id is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_asset_lookup() or supports_visible_federation() is false compliance: optional - This method must be implemented if supports_asset_lookup() and supports_visible_federation() are true. """ if not repository_id: raise NullArgument() if not self.supports_asset_lookup(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed('import error') proxy = self._convert_proxy(proxy) try: session = sessions.AssetLookupSession(repository_id, proxy, runtime=self._runtime, **kwargs) except AttributeError: raise OperationFailed('attribute error') return session
Gets the OsidSession associated with the asset lookup service for the given repository. arg: repository_id (osid.id.Id): the Id of the repository arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.AssetLookupSession) - the new AssetLookupSession raise: NotFound - repository_id not found raise: NullArgument - repository_id is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_asset_lookup() or supports_visible_federation() is false compliance: optional - This method must be implemented if supports_asset_lookup() and supports_visible_federation() are true.
def schemaValidCtxtGetParserCtxt(self): """allow access to the parser context of the schema validation context """ ret = libxml2mod.xmlSchemaValidCtxtGetParserCtxt(self._o) if ret is None:raise parserError('xmlSchemaValidCtxtGetParserCtxt() failed') __tmp = parserCtxt(_obj=ret) return __tmp
allow access to the parser context of the schema validation context
def function_call_prepare_action(self, text, loc, fun): """Code executed after recognising a function call (type and function name)""" exshared.setpos(loc, text) if DEBUG > 0: print("FUN_PREP:",fun) if DEBUG == 2: self.symtab.display() if DEBUG > 2: return index = self.symtab.lookup_symbol(fun.name, SharedData.KINDS.FUNCTION) if index == None: raise SemanticException("'%s' is not a function" % fun.name) #save any previous function call data (for nested function calls) self.function_call_stack.append(self.function_call_index) self.function_call_index = index self.function_arguments_stack.append(self.function_arguments[:]) del self.function_arguments[:] self.codegen.save_used_registers()
Code executed after recognising a function call (type and function name)
def profile_remove(name, **kwargs): """ Remove profile from the storage. """ ctx = Context(**kwargs) ctx.execute_action('profile:remove', **{ 'storage': ctx.repo.create_secure_service('storage'), 'name': name, })
Remove profile from the storage.
def domain_delete(auth=None, **kwargs): ''' Delete a domain CLI Example: .. code-block:: bash salt '*' keystoneng.domain_delete name=domain1 salt '*' keystoneng.domain_delete name=b62e76fbeeff4e8fb77073f591cf211e ''' cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.delete_domain(**kwargs)
Delete a domain CLI Example: .. code-block:: bash salt '*' keystoneng.domain_delete name=domain1 salt '*' keystoneng.domain_delete name=b62e76fbeeff4e8fb77073f591cf211e
def get_related_galleries(gallery, count=5): """ Gets latest related galleries from same section as originating gallery. Count defaults to five but can be overridden. Usage: {% get_related_galleries gallery <10> %} """ # just get the first cat. If they assigned to more than one, tough try: cat = gallery.sections.all()[0] related = cat.gallery_categories.filter(published=True).exclude(id=gallery.id).order_by('-id')[:count] except: related = None return {'related': related, 'MEDIA_URL': settings.MEDIA_URL}
Gets latest related galleries from same section as originating gallery. Count defaults to five but can be overridden. Usage: {% get_related_galleries gallery <10> %}
def create_examples_train(candidate_dialog_paths, rng, positive_probability=0.5, max_context_length=20): """ Creates single training example. :param candidate_dialog_paths: :param rng: :param positive_probability: probability of selecting positive training example :return: """ i = 0 examples = [] for context_dialog in candidate_dialog_paths: if i % 1000 == 0: print str(i) dialog_path = candidate_dialog_paths[i] examples.append(create_single_dialog_train_example(dialog_path, candidate_dialog_paths, rng, positive_probability, max_context_length=max_context_length)) i += 1
Creates single training example. :param candidate_dialog_paths: :param rng: :param positive_probability: probability of selecting positive training example :return:
def __build_lxml(target, source, env): """ General XSLT builder (HTML/FO), using the lxml module. """ from lxml import etree xslt_ac = etree.XSLTAccessControl(read_file=True, write_file=True, create_dir=True, read_network=False, write_network=False) xsl_style = env.subst('$DOCBOOK_XSL') xsl_tree = etree.parse(xsl_style) transform = etree.XSLT(xsl_tree, access_control=xslt_ac) doc = etree.parse(str(source[0])) # Support for additional parameters parampass = {} if parampass: result = transform(doc, **parampass) else: result = transform(doc) try: of = open(str(target[0]), "wb") of.write(of.write(etree.tostring(result, pretty_print=True))) of.close() except: pass return None
General XSLT builder (HTML/FO), using the lxml module.
def get(self, client_method, get_params, is_json=True): """Make a GET request""" url = self._wa.apollo_url + self.CLIENT_BASE + client_method headers = {} response = requests.get(url, headers=headers, verify=self.__verify, params=get_params, **self._request_args) if response.status_code == 200: if is_json: data = response.json() return self._scrub_data(data) else: return response.text # @see self.body for HTTP response body raise Exception("Unexpected response from apollo %s: %s" % (response.status_code, response.text))
Make a GET request
def checkRequirements(sender,**kwargs): ''' Check that the customer meets all prerequisites for the items in the registration. ''' if not getConstant('requirements__enableRequirements'): return logger.debug('Signal to check RegistrationContactForm handled by prerequisites app.') formData = kwargs.get('formData',{}) first = formData.get('firstName') last = formData.get('lastName') email = formData.get('email') request = kwargs.get('request',{}) registration = kwargs.get('registration',None) customer = Customer.objects.filter( first_name=first, last_name=last, email=email).first() requirement_warnings = [] requirement_errors = [] for ter in registration.temporaryeventregistration_set.all(): if hasattr(ter.event,'getRequirements'): for req in ter.event.getRequirements(): if not req.customerMeetsRequirement( customer=customer, danceRole=ter.role ): if req.enforcementMethod == Requirement.EnforcementChoice.error: requirement_errors.append((ter.event.name, req.name)) if req.enforcementMethod == Requirement.EnforcementChoice.warning: requirement_warnings.append((ter.event.name,req.name)) if requirement_errors: raise ValidationError(format_html( '<p>{}</p> <ul>{}</ul> <p>{}</p>', ugettext('Unfortunately, you do not meet the following requirements/prerequisites for the items you have chosen:\n'), mark_safe(''.join(['<li><em>%s:</em> %s</li>\n' % x for x in requirement_errors])), getConstant('requirements__errorMessage') or '', )) if requirement_warnings: messages.warning(request,format_html( '<p>{}</p> <ul>{}</ul> <p>{}</p>', mark_safe(ugettext('<strong>Please Note:</strong> It appears that you do not meet the following requirements/prerequisites for the items you have chosen:\n')), mark_safe(''.join(['<li><em>%s:</em> %s</li>\n' % x for x in requirement_warnings])), getConstant('requirements__warningMessage') or '', ))
Check that the customer meets all prerequisites for the items in the registration.
def refresh_lock(lock_file): """'Refresh' an existing lock. 'Refresh' an existing lock by re-writing the file containing the owner's unique id, using a new (randomly generated) id, which is also returned. """ unique_id = '%s_%s_%s' % ( os.getpid(), ''.join([str(random.randint(0, 9)) for i in range(10)]), hostname) try: lock_write = open(lock_file, 'w') lock_write.write(unique_id + '\n') lock_write.close() except Exception: # In some strange case, this happen. To prevent all tests # from failing, we release the lock, but as there is a # problem, we still keep the original exception. # This way, only 1 test would fail. while get_lock.n_lock > 0: release_lock() raise return unique_id
Refresh' an existing lock. 'Refresh' an existing lock by re-writing the file containing the owner's unique id, using a new (randomly generated) id, which is also returned.
def as_tuple(obj): " Given obj return a tuple " if not obj: return tuple() if isinstance(obj, (tuple, set, list)): return tuple(obj) if hasattr(obj, '__iter__') and not isinstance(obj, dict): return obj return obj,
Given obj return a tuple
def main(args=sys.argv[1:]): '''Processes command line arguments and file i/o''' if not args: sys.stderr.write(_usage() + '\n') sys.exit(4) else: parsed = _parse_args(args) # Set delim based on whether or not regex is desired by user delim = parsed.delimiter if parsed.regex else re.escape(parsed.delimiter) # Keep track of number of cutters used to allow error handling if # multiple options selected (only one at a time is accepted) num_cutters = 0 # Read mode will be used as file read mode later. 'r' is default, changed # to 'rb' in the event that binary read mode is selected by user read_mode = 'r' if parsed.bytes: positions = parsed.bytes cutter = ByteCutter(positions) num_cutters += 1 read_mode = 'rb' if parsed.chars: positions = parsed.chars cutter = CharCutter(positions) num_cutters += 1 if parsed.fields: positions = parsed.fields cutter = FieldCutter(positions, delim, parsed.separator) num_cutters += 1 # Make sure only one option of -b,-c, or -f is used if num_cutters > 1: sys.stderr.write('Only one option permitted of -b, -c, -f.\n') sys.stderr.write(_usage() + '\n') sys.exit(1) # Check for possible specification of zero index, which is not allowed. # Regular expression checks for zero by itself, or in range specification if [n for n in positions if re.search("0:?|0$", n)]: sys.stderr.write('Zero is an invalid position.\n') sys.stderr.write(_usage() + '\n') sys.exit(2) try: for line in fileinput.input(parsed.file, mode=read_mode): if parsed.skip and not re.search(parsed.delimiter, line): pass else: # Using sys.stdout.write for consistency between Py 2 and 3, # keeping linter happy print(cutter.cut(line)) except IOError: sys.stderr.write('File \'' + fileinput.filename() + '\' could not be found.\n') sys.exit(3) fileinput.close()
Processes command line arguments and file i/o
def get(self, request, slug): """Basic functionality for GET request to view. """ matching_datasets = self.generate_matching_datasets(slug) if matching_datasets is None: raise Http404("Datasets meeting these criteria do not exist.") base_context = { 'datasets': matching_datasets, 'num_datasets': matching_datasets.count(), 'page_title': self.generate_page_title(slug), } additional_context = self.generate_additional_context( matching_datasets ) base_context.update(additional_context) context = base_context return render( request, self.template_path, context )
Basic functionality for GET request to view.
def clean_proc_dir(opts): ''' Loop through jid files in the minion proc directory (default /var/cache/salt/minion/proc) and remove any that refer to processes that no longer exist ''' for basefilename in os.listdir(salt.minion.get_proc_dir(opts['cachedir'])): fn_ = os.path.join(salt.minion.get_proc_dir(opts['cachedir']), basefilename) with salt.utils.files.fopen(fn_, 'rb') as fp_: job = None try: job = salt.payload.Serial(opts).load(fp_) except Exception: # It's corrupted # Windows cannot delete an open file if salt.utils.platform.is_windows(): fp_.close() try: os.unlink(fn_) continue except OSError: continue log.debug( 'schedule.clean_proc_dir: checking job %s for process ' 'existence', job ) if job is not None and 'pid' in job: if salt.utils.process.os_is_running(job['pid']): log.debug( 'schedule.clean_proc_dir: Cleaning proc dir, pid %s ' 'still exists.', job['pid'] ) else: # Windows cannot delete an open file if salt.utils.platform.is_windows(): fp_.close() # Maybe the file is already gone try: os.unlink(fn_) except OSError: pass
Loop through jid files in the minion proc directory (default /var/cache/salt/minion/proc) and remove any that refer to processes that no longer exist
def _clean_value(key, val): ''' Clean out well-known bogus values. If it isn't clean (for example has value 'None'), return None. Otherwise, return the original value. NOTE: This logic also exists in the smbios module. This function is for use when not using smbios to retrieve the value. ''' if (val is None or not val or re.match('none', val, flags=re.IGNORECASE)): return None elif 'uuid' in key: # Try each version (1-5) of RFC4122 to check if it's actually a UUID for uuidver in range(1, 5): try: uuid.UUID(val, version=uuidver) return val except ValueError: continue log.trace('HW %s value %s is an invalid UUID', key, val.replace('\n', ' ')) return None elif re.search('serial|part|version', key): # 'To be filled by O.E.M. # 'Not applicable' etc. # 'Not specified' etc. # 0000000, 1234567 etc. # begone! if (re.match(r'^[0]+$', val) or re.match(r'[0]?1234567[8]?[9]?[0]?', val) or re.search(r'sernum|part[_-]?number|specified|filled|applicable', val, flags=re.IGNORECASE)): return None elif re.search('asset|manufacturer', key): # AssetTag0. Manufacturer04. Begone. if re.search(r'manufacturer|to be filled|available|asset|^no(ne|t)', val, flags=re.IGNORECASE): return None else: # map unspecified, undefined, unknown & whatever to None if (re.search(r'to be filled', val, flags=re.IGNORECASE) or re.search(r'un(known|specified)|no(t|ne)? (asset|provided|defined|available|present|specified)', val, flags=re.IGNORECASE)): return None return val
Clean out well-known bogus values. If it isn't clean (for example has value 'None'), return None. Otherwise, return the original value. NOTE: This logic also exists in the smbios module. This function is for use when not using smbios to retrieve the value.
def eventFilter(self, object, event): """ Processes when the window is moving to update the position for the popup if in popup mode. :param object | <QObject> event | <QEvent> """ if not self.isVisible(): return False links = self.positionLinkedTo() is_dialog = self.currentMode() == self.Mode.Dialog if object not in links: return False if event.type() == event.Close: self.close() return False if event.type() == event.Hide and not is_dialog: self.hide() return False if event.type() == event.Move and not is_dialog: deltaPos = event.pos() - event.oldPos() self.move(self.pos() + deltaPos) return False if self.currentMode() != self.Mode.ToolTip: return False if event.type() == event.Leave: pos = object.mapFromGlobal(QCursor.pos()) if (not object.rect().contains(pos)): self.close() event.accept() return True if event.type() in (event.MouseButtonPress, event.MouseButtonDblClick): self.close() event.accept() return True return False
Processes when the window is moving to update the position for the popup if in popup mode. :param object | <QObject> event | <QEvent>
def compile(pattern, namespaces=None, flags=0, **kwargs): # noqa: A001 """Compile CSS pattern.""" if namespaces is not None: namespaces = ct.Namespaces(**namespaces) custom = kwargs.get('custom') if custom is not None: custom = ct.CustomSelectors(**custom) if isinstance(pattern, SoupSieve): if flags: raise ValueError("Cannot process 'flags' argument on a compiled selector list") elif namespaces is not None: raise ValueError("Cannot process 'namespaces' argument on a compiled selector list") elif custom is not None: raise ValueError("Cannot process 'custom' argument on a compiled selector list") return pattern return cp._cached_css_compile(pattern, namespaces, custom, flags)
Compile CSS pattern.
def stats(self): """ Return statistics calculated overall samples of all utterances in the corpus. Returns: DataStats: A DataStats object containing statistics overall samples in the corpus. """ per_utt_stats = self.stats_per_utterance() return stats.DataStats.concatenate(per_utt_stats.values())
Return statistics calculated overall samples of all utterances in the corpus. Returns: DataStats: A DataStats object containing statistics overall samples in the corpus.
def _list_files(root): """ Lists all of the files in a directory, taking into account any .gitignore file that is present :param root: A unicode filesystem path :return: A list of unicode strings, containing paths of all files not ignored by .gitignore with root, using relative paths """ dir_patterns, file_patterns = _gitignore(root) paths = [] prefix = os.path.abspath(root) + os.sep for base, dirs, files in os.walk(root): for d in dirs: for dir_pattern in dir_patterns: if fnmatch(d, dir_pattern): dirs.remove(d) break for f in files: skip = False for file_pattern in file_patterns: if fnmatch(f, file_pattern): skip = True break if skip: continue full_path = os.path.join(base, f) if full_path[:len(prefix)] == prefix: full_path = full_path[len(prefix):] paths.append(full_path) return sorted(paths)
Lists all of the files in a directory, taking into account any .gitignore file that is present :param root: A unicode filesystem path :return: A list of unicode strings, containing paths of all files not ignored by .gitignore with root, using relative paths
def main(arguments=None): """ *The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command* """ # setup the command-line util settings su = tools( arguments=arguments, docString=__doc__, logLevel="DEBUG", options_first=False, projectName="picaxe" ) arguments, settings, log, dbConn = su.setup() startTime = times.get_now_sql_datetime() # unpack remaining cl arguments using `exec` to setup the variable names # automatically for arg, val in arguments.iteritems(): if arg[0] == "-": varname = arg.replace("-", "") + "Flag" else: varname = arg.replace("<", "").replace(">", "") if isinstance(val, str) or isinstance(val, unicode): exec(varname + " = '%s'" % (val,)) else: exec(varname + " = %s" % (val,)) if arg == "--dbConn": dbConn = val log.debug('%s = %s' % (varname, val,)) if init: from os.path import expanduser home = expanduser("~") filepath = home + "/.config/picaxe/picaxe.yaml" try: cmd = """open %(filepath)s""" % locals() p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) except: pass try: cmd = """start %(filepath)s""" % locals() p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) except: pass if auth: from picaxe import picaxe client = picaxe( log=log, settings=settings, pathToSettingsFile=pathToSettingsFile ) client.authenticate() if md: from picaxe import picaxe Flickr = picaxe( log=log, settings=settings ) if not width: width = "original" mdLink = Flickr.md( url=urlOrPhotoid, # [75, 100, 150, 240, 320, 500, 640, 800, 1024, 1600, 2048] width=width ) print mdLink if albums: from picaxe import picaxe flickr = picaxe( log=log, settings=settings ) albumList = flickr.list_album_titles() for a in albumList: print a if upload: from picaxe import picaxe flickr = picaxe( log=log, settings=settings ) imageType = "photo" if screenGrabFlag: imageType = "screengrab" elif imageFlag: imageType = "image" album = "inbox" if albumFlag: album = albumFlag photoid = flickr.upload( imagePath=imagePath, title=titleFlag, private=publicFlag, tags=tagsFlag, description=descFlag, imageType=imageType, # image|screengrab|photo album=albumFlag, openInBrowser=openFlag ) print photoid if grab: # for k, v in locals().iteritems(): # print k, v # return try: os.remove("/tmp/screengrab.png") except: pass if delayFlag: time.sleep(int(delayFlag)) from subprocess import Popen, PIPE, STDOUT cmd = """screencapture -i /tmp/screengrab.png""" % locals() p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) stdout, stderr = p.communicate() log.debug('output: %(stdout)s' % locals()) exists = os.path.exists("/tmp/screengrab.png") if exists: from picaxe import picaxe flickr = picaxe( log=log, settings=settings ) if not albumFlag: albumFlag = "screengrabs" photoid = flickr.upload( imagePath="/tmp/screengrab.png", title=titleFlag, private=publicFlag, tags=tagsFlag, description=descFlag, imageType="screengrab", # image|screengrab|photo album=albumFlag, openInBrowser=openFlag ) mdLink = flickr.md( url=photoid, # [75, 100, 150, 240, 320, 500, 640, 800, 1024, 1600, 2048] width="original" ) print mdLink # CALL FUNCTIONS/OBJECTS if "dbConn" in locals() and dbConn: dbConn.commit() dbConn.close() ## FINISH LOGGING ## endTime = times.get_now_sql_datetime() runningTime = times.calculate_time_difference(startTime, endTime) log.info('-- FINISHED ATTEMPT TO RUN THE cl_utils.py AT %s (RUNTIME: %s) --' % (endTime, runningTime, )) return
*The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command*
def _put_bucket_website(self): """Configure static website on S3 bucket.""" if self.s3props['website']['enabled']: website_config = { 'ErrorDocument': { 'Key': self.s3props['website']['error_document'] }, 'IndexDocument': { 'Suffix': self.s3props['website']['index_suffix'] } } _response = self.s3client.put_bucket_website(Bucket=self.bucket, WebsiteConfiguration=website_config) self._put_bucket_cors() self._set_bucket_dns() else: _response = self.s3client.delete_bucket_website(Bucket=self.bucket) self._put_bucket_cors() LOG.debug('Response setting up S3 website: %s', _response) LOG.info('S3 website settings updated')
Configure static website on S3 bucket.
async def seek(self, pos, *, device: Optional[SomeDevice] = None): """Seeks to the given position in the user’s currently playing track. Parameters ---------- pos : int The position in milliseconds to seek to. Must be a positive number. Passing in a position that is greater than the length of the track will cause the player to start playing the next song. device : Optional[:obj:`SomeDevice`] The Device object or id of the device this command is targeting. If not supplied, the user’s currently active device is the target. """ await self._user.http.seek_playback(pos, device_id=str(device))
Seeks to the given position in the user’s currently playing track. Parameters ---------- pos : int The position in milliseconds to seek to. Must be a positive number. Passing in a position that is greater than the length of the track will cause the player to start playing the next song. device : Optional[:obj:`SomeDevice`] The Device object or id of the device this command is targeting. If not supplied, the user’s currently active device is the target.
def get_nas_credentials(self, identifier, **kwargs): """Returns a list of IDs of VLANs which match the given VLAN name. :param integer instance_id: the instance ID :returns: A dictionary containing a large amount of information about the specified instance. """ result = self.network_storage.getObject(id=identifier, **kwargs) return result
Returns a list of IDs of VLANs which match the given VLAN name. :param integer instance_id: the instance ID :returns: A dictionary containing a large amount of information about the specified instance.
def get_lemma_by_id(self, mongo_id): ''' Builds a Lemma object from the database entry with the given ObjectId. Arguments: - `mongo_id`: a bson.objectid.ObjectId object ''' cache_hit = None if self._lemma_cache is not None: cache_hit = self._lemma_cache.get(mongo_id) if cache_hit is not None: return cache_hit lemma_dict = self._mongo_db.lexunits.find_one({'_id': mongo_id}) if lemma_dict is not None: lemma = Lemma(self, lemma_dict) if self._lemma_cache is not None: self._lemma_cache.put(mongo_id, lemma) return lemma
Builds a Lemma object from the database entry with the given ObjectId. Arguments: - `mongo_id`: a bson.objectid.ObjectId object
def _example_short_number_for_cost(region_code, cost): """Gets a valid short number for the specified cost category. Arguments: region_code -- the region for which an example short number is needed. cost -- the cost category of number that is needed. Returns a valid short number for the specified region and cost category. Returns an empty string when the metadata does not contain such information, or the cost is UNKNOWN_COST. """ metadata = PhoneMetadata.short_metadata_for_region(region_code) if metadata is None: return U_EMPTY_STRING desc = None if cost == ShortNumberCost.TOLL_FREE: desc = metadata.toll_free elif cost == ShortNumberCost.STANDARD_RATE: desc = metadata.standard_rate elif cost == ShortNumberCost.PREMIUM_RATE: desc = metadata.premium_rate else: # ShortNumberCost.UNKNOWN_COST numbers are computed by the process of # elimination from the other cost categoried. pass if desc is not None and desc.example_number is not None: return desc.example_number return U_EMPTY_STRING
Gets a valid short number for the specified cost category. Arguments: region_code -- the region for which an example short number is needed. cost -- the cost category of number that is needed. Returns a valid short number for the specified region and cost category. Returns an empty string when the metadata does not contain such information, or the cost is UNKNOWN_COST.
def _check_compound_minions(self, expr, delimiter, greedy, pillar_exact=False): # pylint: disable=unused-argument ''' Return the minions found by looking via compound matcher ''' if not isinstance(expr, six.string_types) and not isinstance(expr, (list, tuple)): log.error('Compound target that is neither string, list nor tuple') return {'minions': [], 'missing': []} minions = set(self._pki_minions()) log.debug('minions: %s', minions) nodegroups = self.opts.get('nodegroups', {}) if self.opts.get('minion_data_cache', False): ref = {'G': self._check_grain_minions, 'P': self._check_grain_pcre_minions, 'I': self._check_pillar_minions, 'J': self._check_pillar_pcre_minions, 'L': self._check_list_minions, 'N': None, # nodegroups should already be expanded 'S': self._check_ipcidr_minions, 'E': self._check_pcre_minions, 'R': self._all_minions} if pillar_exact: ref['I'] = self._check_pillar_exact_minions ref['J'] = self._check_pillar_exact_minions results = [] unmatched = [] opers = ['and', 'or', 'not', '(', ')'] missing = [] if isinstance(expr, six.string_types): words = expr.split() else: # we make a shallow copy in order to not affect the passed in arg words = expr[:] while words: word = words.pop(0) target_info = parse_target(word) # Easy check first if word in opers: if results: if results[-1] == '(' and word in ('and', 'or'): log.error('Invalid beginning operator after "(": %s', word) return {'minions': [], 'missing': []} if word == 'not': if not results[-1] in ('&', '|', '('): results.append('&') results.append('(') results.append(six.text_type(set(minions))) results.append('-') unmatched.append('-') elif word == 'and': results.append('&') elif word == 'or': results.append('|') elif word == '(': results.append(word) unmatched.append(word) elif word == ')': if not unmatched or unmatched[-1] != '(': log.error('Invalid compound expr (unexpected ' 'right parenthesis): %s', expr) return {'minions': [], 'missing': []} results.append(word) unmatched.pop() if unmatched and unmatched[-1] == '-': results.append(')') unmatched.pop() else: # Won't get here, unless oper is added log.error('Unhandled oper in compound expr: %s', expr) return {'minions': [], 'missing': []} else: # seq start with oper, fail if word == 'not': results.append('(') results.append(six.text_type(set(minions))) results.append('-') unmatched.append('-') elif word == '(': results.append(word) unmatched.append(word) else: log.error( 'Expression may begin with' ' binary operator: %s', word ) return {'minions': [], 'missing': []} elif target_info and target_info['engine']: if 'N' == target_info['engine']: # if we encounter a node group, just evaluate it in-place decomposed = nodegroup_comp(target_info['pattern'], nodegroups) if decomposed: words = decomposed + words continue engine = ref.get(target_info['engine']) if not engine: # If an unknown engine is called at any time, fail out log.error( 'Unrecognized target engine "%s" for' ' target expression "%s"', target_info['engine'], word, ) return {'minions': [], 'missing': []} engine_args = [target_info['pattern']] if target_info['engine'] in ('G', 'P', 'I', 'J'): engine_args.append(target_info['delimiter'] or ':') engine_args.append(greedy) # ignore missing minions for lists if we exclude them with # a 'not' if 'L' == target_info['engine']: engine_args.append(results and results[-1] == '-') _results = engine(*engine_args) results.append(six.text_type(set(_results['minions']))) missing.extend(_results['missing']) if unmatched and unmatched[-1] == '-': results.append(')') unmatched.pop() else: # The match is not explicitly defined, evaluate as a glob _results = self._check_glob_minions(word, True) results.append(six.text_type(set(_results['minions']))) if unmatched and unmatched[-1] == '-': results.append(')') unmatched.pop() # Add a closing ')' for each item left in unmatched results.extend([')' for item in unmatched]) results = ' '.join(results) log.debug('Evaluating final compound matching expr: %s', results) try: minions = list(eval(results)) # pylint: disable=W0123 return {'minions': minions, 'missing': missing} except Exception: log.error('Invalid compound target: %s', expr) return {'minions': [], 'missing': []} return {'minions': list(minions), 'missing': []}
Return the minions found by looking via compound matcher
def reverse(self): """ Reverses the items of this collection "in place" (only two values are retrieved from Redis at a time). """ def reverse_trans(pipe): if self.writeback: self._sync_helper(pipe) n = self.__len__(pipe) for i in range(n // 2): left = pipe.lindex(self.key, i) right = pipe.lindex(self.key, n - i - 1) pipe.lset(self.key, i, right) pipe.lset(self.key, n - i - 1, left) self._transaction(reverse_trans)
Reverses the items of this collection "in place" (only two values are retrieved from Redis at a time).
def mark_read(user, message): """ Mark message instance as read for user. Returns True if the message was `unread` and thus actually marked as `read` or False in case it is already `read` or it does not exist at all. :param user: user instance for the recipient :param message: a Message instance to mark as read """ BackendClass = stored_messages_settings.STORAGE_BACKEND backend = BackendClass() backend.inbox_delete(user, message)
Mark message instance as read for user. Returns True if the message was `unread` and thus actually marked as `read` or False in case it is already `read` or it does not exist at all. :param user: user instance for the recipient :param message: a Message instance to mark as read
def translate(self, text, to_template='{name} ({url})', from_template=None, name_matcher=None, url_matcher=None): """ Translate hyperinks into printable book style for Manning Publishing >>> translator = HyperlinkStyleCorrector() >>> adoc = 'See http://totalgood.com[Total Good] about that.' >>> translator.translate(adoc) 'See Total Good (http://totalgood.com) about that.' """ return self.replace(text, to_template=to_template, from_template=from_template, name_matcher=name_matcher, url_matcher=url_matcher)
Translate hyperinks into printable book style for Manning Publishing >>> translator = HyperlinkStyleCorrector() >>> adoc = 'See http://totalgood.com[Total Good] about that.' >>> translator.translate(adoc) 'See Total Good (http://totalgood.com) about that.'
def set_regs(self, regs_dump): """ Initialize register values within the state :param regs_dump: The output of ``info registers`` in gdb. """ if self.real_stack_top == 0 and self.adjust_stack is True: raise SimStateError("You need to set the stack first, or set" "adjust_stack to False. Beware that in this case, sp and bp won't be updated") data = self._read_data(regs_dump) rdata = re.split(b"\n", data) for r in rdata: if r == b"": continue reg = re.split(b" +", r)[0].decode() val = int(re.split(b" +", r)[1],16) try: self.state.registers.store(reg, claripy.BVV(val, self.state.arch.bits)) # Some registers such as cs, ds, eflags etc. aren't supported in angr except KeyError as e: l.warning("Reg %s was not set", e) self._adjust_regs()
Initialize register values within the state :param regs_dump: The output of ``info registers`` in gdb.
def partition_key(self, value): """ Set the partition key of the event data object. :param value: The partition key to set. :type value: str or bytes """ annotations = dict(self._annotations) annotations[self._partition_key] = value header = MessageHeader() header.durable = True self.message.annotations = annotations self.message.header = header self._annotations = annotations
Set the partition key of the event data object. :param value: The partition key to set. :type value: str or bytes
def _is_output(part): """ Returns whether the given part represents an output variable. """ if part[0].lower() == 'o': return True elif part[0][:2].lower() == 'o:': return True elif part[0][:2].lower() == 'o.': return True else: return False
Returns whether the given part represents an output variable.
def compose_dynamic_tree(src, target_tree_alias=None, parent_tree_item_alias=None, include_trees=None): """Returns a structure describing a dynamic sitetree.utils The structure can be built from various sources, :param str|iterable src: If a string is passed to `src`, it'll be treated as the name of an app, from where one want to import sitetrees definitions. `src` can be an iterable of tree definitions (see `sitetree.toolbox.tree()` and `item()` functions). :param str|unicode target_tree_alias: Static tree alias to attach items from dynamic trees to. :param str|unicode parent_tree_item_alias: Tree item alias from a static tree to attach items from dynamic trees to. :param list include_trees: Sitetree aliases to filter `src`. :rtype: dict """ def result(sitetrees=src): if include_trees is not None: sitetrees = [tree for tree in sitetrees if tree.alias in include_trees] return { 'app': src, 'sitetrees': sitetrees, 'tree': target_tree_alias, 'parent_item': parent_tree_item_alias} if isinstance(src, six.string_types): # Considered to be an application name. try: module = import_app_sitetree_module(src) return None if module is None else result(getattr(module, 'sitetrees', None)) except ImportError as e: if settings.DEBUG: warnings.warn('Unable to register dynamic sitetree(s) for `%s` application: %s. ' % (src, e)) return None return result()
Returns a structure describing a dynamic sitetree.utils The structure can be built from various sources, :param str|iterable src: If a string is passed to `src`, it'll be treated as the name of an app, from where one want to import sitetrees definitions. `src` can be an iterable of tree definitions (see `sitetree.toolbox.tree()` and `item()` functions). :param str|unicode target_tree_alias: Static tree alias to attach items from dynamic trees to. :param str|unicode parent_tree_item_alias: Tree item alias from a static tree to attach items from dynamic trees to. :param list include_trees: Sitetree aliases to filter `src`. :rtype: dict
def merge_with(self, other): """Merge this ``ValuesAggregation`` with another one""" result = ValuesAggregation() result.total = self.total + other.total result.count = self.count + other.count result.min = min(self.min, other.min) result.max = max(self.max, other.max) return result
Merge this ``ValuesAggregation`` with another one
def derived(self, name, relative_coords, formula): """Helper function for derived quantities""" relZ, relN = relative_coords daughter_idx = [(x[0] + relZ, x[1] + relN) for x in self.df.index] values = formula(self.df.values, self.df.loc[daughter_idx].values) return Table(df=pd.Series(values, index=self.df.index, name=name + '(' + self.name + ')'))
Helper function for derived quantities
def xml_report(self, morfs=None, outfile=None, ignore_errors=None, omit=None, include=None): """Generate an XML report of coverage results. The report is compatible with Cobertura reports. Each module in `morfs` is included in the report. `outfile` is the path to write the file to, "-" will write to stdout. See `coverage.report()` for other arguments. Returns a float, the total percentage covered. """ self._harvest_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, xml_output=outfile, ) file_to_close = None delete_file = False if self.config.xml_output: if self.config.xml_output == '-': outfile = sys.stdout else: outfile = open(self.config.xml_output, "w") file_to_close = outfile try: try: reporter = XmlReporter(self, self.config) return reporter.report(morfs, outfile=outfile) except CoverageException: delete_file = True raise finally: if file_to_close: file_to_close.close() if delete_file: file_be_gone(self.config.xml_output)
Generate an XML report of coverage results. The report is compatible with Cobertura reports. Each module in `morfs` is included in the report. `outfile` is the path to write the file to, "-" will write to stdout. See `coverage.report()` for other arguments. Returns a float, the total percentage covered.
def as_dict(self, join='.'): """ Returns all the errors in this collection as a path to message dictionary. Paths are joined with the ``join`` string. """ result = {} for e in self.errors: result.update(e.as_dict(join)) return result
Returns all the errors in this collection as a path to message dictionary. Paths are joined with the ``join`` string.
def _render_bundle(bundle_name): """ Renders the HTML for a bundle in place - one HTML tag or many depending on settings.USE_BUNDLES """ try: bundle = get_bundles()[bundle_name] except KeyError: raise ImproperlyConfigured("Bundle '%s' is not defined" % bundle_name) if bundle.use_bundle: return _render_file(bundle.bundle_type, bundle.get_url(), attrs=({'media':bundle.media} if bundle.media else {})) # Render files individually bundle_files = [] for bundle_file in bundle.files: if bundle_file.precompile_in_debug: bundle_files.append(_render_file(bundle_file.bundle_type, bundle_file.precompile_url, attrs=({'media':bundle_file.media} if bundle.media else {}))) else: bundle_files.append(_render_file(bundle_file.file_type, bundle_file.file_url, attrs=({'media':bundle_file.media} if bundle.media else {}))) return '\n'.join(bundle_files)
Renders the HTML for a bundle in place - one HTML tag or many depending on settings.USE_BUNDLES
async def check_authorized(self, identity): """ Works like :func:`Security.identity`, but when check is failed :func:`UnauthorizedError` exception is raised. :param identity: Claim :return: Checked claim or return ``None`` :raise: :func:`UnauthorizedError` """ identify = await self.identify(identity) if identify is None: raise UnauthorizedError() return identify
Works like :func:`Security.identity`, but when check is failed :func:`UnauthorizedError` exception is raised. :param identity: Claim :return: Checked claim or return ``None`` :raise: :func:`UnauthorizedError`
def process(self, element): """Run the transformation graph on batched input data Args: element: list of csv strings, representing one batch input to the TF graph. Returns: dict containing the transformed data. Results are un-batched. Sparse tensors are converted to lists. """ import apache_beam as beam import six import tensorflow as tf # This function is invoked by a separate sub-process so setting the logging level # does not affect Datalab's kernel process. tf.logging.set_verbosity(tf.logging.ERROR) try: clean_element = [] for line in element: clean_element.append(line.rstrip()) # batch_result is list of numpy arrays with batch_size many rows. batch_result = self._session.run( fetches=self._transformed_features, feed_dict={self._input_placeholder_tensor: clean_element}) # ex batch_result. # Dense tensor: {'col1': array([[batch_1], [batch_2]])} # Sparse tensor: {'col1': tf.SparseTensorValue( # indices=array([[batch_1, 0], [batch_1, 1], ..., # [batch_2, 0], [batch_2, 1], ...]], # values=array[value, value, value, ...])} # Unbatch the results. for i in range(len(clean_element)): transformed_features = {} for name, value in six.iteritems(batch_result): if isinstance(value, tf.SparseTensorValue): batch_i_indices = value.indices[:, 0] == i batch_i_values = value.values[batch_i_indices] transformed_features[name] = batch_i_values.tolist() else: transformed_features[name] = value[i].tolist() yield transformed_features except Exception as e: # pylint: disable=broad-except yield beam.pvalue.TaggedOutput('errors', (str(e), element))
Run the transformation graph on batched input data Args: element: list of csv strings, representing one batch input to the TF graph. Returns: dict containing the transformed data. Results are un-batched. Sparse tensors are converted to lists.
def do_use(self, args): """Use another instance, provided as argument.""" self.instance = args self.prompt = self.instance + '> ' archive = self._client.get_archive(self.instance) self.streams = [s.name for s in archive.list_streams()] self.tables = [t.name for t in archive.list_tables()]
Use another instance, provided as argument.
def base_url(self): """A base_url that will be used to construct the final URL we're going to query against. :returns: A URL of the form: ``proto://host:port``. :rtype: :obj:`string` """ return '{proto}://{host}:{port}{url_path}'.format( proto=self.protocol, host=self.host, port=self.port, url_path=self.url_path, )
A base_url that will be used to construct the final URL we're going to query against. :returns: A URL of the form: ``proto://host:port``. :rtype: :obj:`string`
def unit(n, d=None, j=None, tt_instance=True): ''' Generates e_j _vector in tt.vector format --------- Parameters: n - modes (either integer or array) d - dimensionality (integer) j - position of 1 in full-format e_j (integer) tt_instance - if True, returns tt.vector; if False, returns tt cores as a list ''' if isinstance(n, int): if d is None: d = 1 n = n * _np.ones(d, dtype=_np.int32) else: d = len(n) if j is None: j = 0 rv = [] j = _ind2sub(n, j) for k in xrange(d): rv.append(_np.zeros((1, n[k], 1))) rv[-1][0, j[k], 0] = 1 if tt_instance: rv = _vector.vector.from_list(rv) return rv
Generates e_j _vector in tt.vector format --------- Parameters: n - modes (either integer or array) d - dimensionality (integer) j - position of 1 in full-format e_j (integer) tt_instance - if True, returns tt.vector; if False, returns tt cores as a list
def fixminimized(self, alphabet): """ After pyfst minimization, all unused arcs are removed, and all sink states are removed. However this may break compatibility. Args: alphabet (list): The input alphabet Returns: None """ endstate = len(list(self.states)) for state in self.states: for char in alphabet: found = 0 for arc in state.arcs: if self.isyms.find(arc.ilabel) == char: found = 1 break if found == 0: self.add_arc(state.stateid, endstate, char) self[endstate].final = TropicalWeight(float('inf')) for char in alphabet: self.add_arc(endstate, endstate, char)
After pyfst minimization, all unused arcs are removed, and all sink states are removed. However this may break compatibility. Args: alphabet (list): The input alphabet Returns: None
def citations(val): """ # The CR Tag extracts a list of all the citations in the record, the citations are the [metaknowledge.Citation](../classes/Citation.html#metaknowledge.citation.Citation) class. # Parameters _val_: `list[str]` > The raw data from a WOS file # Returns ` list[metaknowledge.Citation]` > A list of Citations """ retCites = [] for c in val: retCites.append(Citation(c)) return retCites
# The CR Tag extracts a list of all the citations in the record, the citations are the [metaknowledge.Citation](../classes/Citation.html#metaknowledge.citation.Citation) class. # Parameters _val_: `list[str]` > The raw data from a WOS file # Returns ` list[metaknowledge.Citation]` > A list of Citations
def _closure_deletelink(self, oldparentpk): """Remove incorrect links from the closure tree.""" self._closure_model.objects.filter( **{ "parent__%s__child" % self._closure_parentref(): oldparentpk, "child__%s__parent" % self._closure_childref(): self.pk } ).delete()
Remove incorrect links from the closure tree.
def template_filter(self, name=None): """A decorator that is used to register custom template filter. You can specify a name for the filter, otherwise the function name will be used. Example:: @app.template_filter() def reverse(s): return s[::-1] :param name: the optional name of the filter, otherwise the function name will be used. """ def decorator(f): self.add_template_filter(f, name=name) return f return decorator
A decorator that is used to register custom template filter. You can specify a name for the filter, otherwise the function name will be used. Example:: @app.template_filter() def reverse(s): return s[::-1] :param name: the optional name of the filter, otherwise the function name will be used.
def validate_proxy_granting_ticket(pgt, target_service): """ Validate a proxy granting ticket string. Return an ordered pair containing a ``ProxyTicket``, or a ``ValidationError`` if ticket validation failed. """ logger.debug("Proxy ticket request received for %s using %s" % (target_service, pgt)) pgt = ProxyGrantingTicket.objects.validate_ticket(pgt, target_service) pt = ProxyTicket.objects.create_ticket(service=target_service, user=pgt.user, granted_by_pgt=pgt) return pt
Validate a proxy granting ticket string. Return an ordered pair containing a ``ProxyTicket``, or a ``ValidationError`` if ticket validation failed.
def list_keyvaults_sub(access_token, subscription_id): '''Lists key vaults belonging to this subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. 200 OK. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.KeyVault/vaults', '?api-version=', KEYVAULT_API]) return do_get_next(endpoint, access_token)
Lists key vaults belonging to this subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. 200 OK.
def remove_env(environment): """ Remove an environment from the configuration. """ if not environment: print("You need to supply an environment name") return parser = read_config() if not parser.remove_section(environment): print("Unknown environment type '%s'" % environment) return write_config(parser) print("Removed environment '%s'" % environment)
Remove an environment from the configuration.