code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def iterlines(s): """ A generator form of s.split('\n') for reducing memory overhead. Parameters ---------- s : str A multi-line string. Yields ------ line : str A string. """ prevnl = -1 while True: nextnl = s.find('\n', prevnl + 1) if nextnl < 0: yield s[(prevnl+1):] break else: yield s[(prevnl+1):nextnl] prevnl = nextnl
A generator form of s.split('\n') for reducing memory overhead. Parameters ---------- s : str A multi-line string. Yields ------ line : str A string.
def byte_href_anchors(self, chars=False): ''' simple, regex-based extractor of anchor tags, so we can compute BYTE offsets for anchor texts and associate them with their href. Generates tuple(href_string, first_byte, byte_length, anchor_text) ''' input_buffer = self.clean_html if chars: input_buffer = input_buffer.decode('utf8') idx = 0 ## split doc up into pieces that end on an anchor tag parts = input_buffer.split('</a>') assert len('</a>'.join(parts) ) == len(input_buffer) for part in parts: ## try to get an A tag out: m = anchors_re.match(part) if not m: idx += len(part) + 4 continue before = m.group('before') ahref = m.group('ahref') ## increment the index to get line number for the anchor idx += len(before) + len(ahref) first = idx length = len(m.group('anchor')) ## update the index for the next loop # include anchor plus the </a> idx += length + 4 if chars: yield m.group('href').encode('utf8'), first, length, m.group('anchor').encode('utf8') else: yield m.group('href'), first, length, m.group('anchor') assert idx - 4 == len(input_buffer)
simple, regex-based extractor of anchor tags, so we can compute BYTE offsets for anchor texts and associate them with their href. Generates tuple(href_string, first_byte, byte_length, anchor_text)
def dens_in_meanmatterdens(vo,ro,H=70.,Om=0.3): """ NAME: dens_in_meanmatterdens PURPOSE: convert density to units of the mean matter density INPUT: vo - velocity unit in km/s ro - length unit in kpc H= (default: 70) Hubble constant in km/s/Mpc Om= (default: 0.3) Omega matter OUTPUT: conversion from units where vo=1. at ro=1. to units of the mean matter density HISTORY: 2014-01-28 - Written - Bovy (IAS) """ return dens_in_criticaldens(vo,ro,H=H)/Om
NAME: dens_in_meanmatterdens PURPOSE: convert density to units of the mean matter density INPUT: vo - velocity unit in km/s ro - length unit in kpc H= (default: 70) Hubble constant in km/s/Mpc Om= (default: 0.3) Omega matter OUTPUT: conversion from units where vo=1. at ro=1. to units of the mean matter density HISTORY: 2014-01-28 - Written - Bovy (IAS)
def update_file_ext(filename, ext='txt', sep='.'): r"""Force the file or path str to end with the indicated extension Note: a dot (".") is assumed to delimit the extension >>> from __future__ import unicode_literals >>> update_file_ext('/home/hobs/extremofile', 'bac') '/home/hobs/extremofile.bac' >>> update_file_ext('/home/hobs/piano.file/', 'music') '/home/hobs/piano.file/.music' >>> update_file_ext('/home/ninja.hobs/Anglofile', '.uk') '/home/ninja.hobs/Anglofile.uk' >>> update_file_ext('/home/ninja-corsi/audio', 'file', sep='-') '/home/ninja-corsi/audio-file' """ path, filename = os.path.split(filename) if ext and ext[0] == sep: ext = ext[1:] return os.path.join(path, sep.join(filename.split(sep)[:-1 if filename.count(sep) > 1 else 1] + [ext]))
r"""Force the file or path str to end with the indicated extension Note: a dot (".") is assumed to delimit the extension >>> from __future__ import unicode_literals >>> update_file_ext('/home/hobs/extremofile', 'bac') '/home/hobs/extremofile.bac' >>> update_file_ext('/home/hobs/piano.file/', 'music') '/home/hobs/piano.file/.music' >>> update_file_ext('/home/ninja.hobs/Anglofile', '.uk') '/home/ninja.hobs/Anglofile.uk' >>> update_file_ext('/home/ninja-corsi/audio', 'file', sep='-') '/home/ninja-corsi/audio-file'
def set_bind(self): """ Sets key bindings -- we need this more than once """ IntegerEntry.set_bind(self) self.bind('<Next>', lambda e: self.set(self.imin)) self.bind('<Prior>', lambda e: self.set(self.imax))
Sets key bindings -- we need this more than once
def close_available(self): """可平仓数量 Returns: [type] -- [description] """ return { 'volume_long': self.volume_long - self.volume_long_frozen, 'volume_short': self.volume_short - self.volume_short_frozen }
可平仓数量 Returns: [type] -- [description]
def _create(self, tree): """ Run a SELECT statement """ tablename = tree.table indexes = [] global_indexes = [] hash_key = None range_key = None attrs = {} for declaration in tree.attrs: name, type_ = declaration[:2] if len(declaration) > 2: index = declaration[2] else: index = None if index is not None: if index[0] == "HASH": field = hash_key = DynamoKey(name, data_type=TYPES[type_]) elif index[0] == "RANGE": field = range_key = DynamoKey(name, data_type=TYPES[type_]) else: index_type = index[0] kwargs = {} if index_type[0] in ("ALL", "INDEX"): factory = LocalIndex.all elif index_type[0] == "KEYS": factory = LocalIndex.keys elif index_type[0] == "INCLUDE": factory = LocalIndex.include kwargs["includes"] = [resolve(v) for v in index.include_vars] index_name = resolve(index[1]) field = DynamoKey(name, data_type=TYPES[type_]) idx = factory(index_name, field, **kwargs) indexes.append(idx) else: field = DynamoKey(name, data_type=TYPES[type_]) attrs[field.name] = field for gindex in tree.global_indexes: global_indexes.append(self._parse_global_index(gindex, attrs)) throughput = None if tree.throughput: throughput = Throughput(*map(resolve, tree.throughput)) try: ret = self.connection.create_table( tablename, hash_key, range_key, indexes=indexes, global_indexes=global_indexes, throughput=throughput, ) except DynamoDBError as e: if e.kwargs["Code"] == "ResourceInUseException" or tree.not_exists: return False raise return True
Run a SELECT statement
def orth_gs(order, dist, normed=False, sort="GR", cross_truncation=1., **kws): """ Gram-Schmidt process for generating orthogonal polynomials. Args: order (int, Poly): The upper polynomial order. Alternative a custom polynomial basis can be used. dist (Dist): Weighting distribution(s) defining orthogonality. normed (bool): If True orthonormal polynomials will be used instead of monic. sort (str): Ordering argument passed to poly.basis. If custom basis is used, argument is ignored. cross_truncation (float): Use hyperbolic cross truncation scheme to reduce the number of terms in expansion. Returns: (Poly): The orthogonal polynomial expansion. Examples: >>> Z = chaospy.J(chaospy.Normal(), chaospy.Normal()) >>> print(chaospy.around(chaospy.orth_gs(2, Z), 4)) [1.0, q1, q0, q1^2-1.0, q0q1, q0^2-1.0] """ logger = logging.getLogger(__name__) dim = len(dist) if isinstance(order, int): if order == 0: return chaospy.poly.Poly(1, dim=dim) basis = chaospy.poly.basis( 0, order, dim, sort, cross_truncation=cross_truncation) else: basis = order basis = list(basis) polynomials = [basis[0]] if normed: for idx in range(1, len(basis)): # orthogonalize polynomial: for idy in range(idx): orth = chaospy.descriptives.E( basis[idx]*polynomials[idy], dist, **kws) basis[idx] = basis[idx] - polynomials[idy]*orth # normalize: norms = chaospy.descriptives.E(polynomials[-1]**2, dist, **kws) if norms <= 0: logger.warning("Warning: Polynomial cutoff at term %d", idx) break basis[idx] = basis[idx] / numpy.sqrt(norms) polynomials.append(basis[idx]) else: norms = [1.] for idx in range(1, len(basis)): # orthogonalize polynomial: for idy in range(idx): orth = chaospy.descriptives.E( basis[idx]*polynomials[idy], dist, **kws) basis[idx] = basis[idx] - polynomials[idy] * orth / norms[idy] norms.append( chaospy.descriptives.E(polynomials[-1]**2, dist, **kws)) if norms[-1] <= 0: logger.warning("Warning: Polynomial cutoff at term %d", idx) break polynomials.append(basis[idx]) return chaospy.poly.Poly(polynomials, dim=dim, shape=(len(polynomials),))
Gram-Schmidt process for generating orthogonal polynomials. Args: order (int, Poly): The upper polynomial order. Alternative a custom polynomial basis can be used. dist (Dist): Weighting distribution(s) defining orthogonality. normed (bool): If True orthonormal polynomials will be used instead of monic. sort (str): Ordering argument passed to poly.basis. If custom basis is used, argument is ignored. cross_truncation (float): Use hyperbolic cross truncation scheme to reduce the number of terms in expansion. Returns: (Poly): The orthogonal polynomial expansion. Examples: >>> Z = chaospy.J(chaospy.Normal(), chaospy.Normal()) >>> print(chaospy.around(chaospy.orth_gs(2, Z), 4)) [1.0, q1, q0, q1^2-1.0, q0q1, q0^2-1.0]
def air_gap(self, volume=None, height=None): """ Pull air into the :any:`Pipette` current tip Notes ----- If no `location` is passed, the pipette will touch_tip from it's current position. Parameters ---------- volume : number The amount in uL to aspirate air into the tube. (Default will use all remaining volume in tip) height : number The number of millimiters to move above the current Placeable to perform and air-gap aspirate (Default will be 10mm above current Placeable) Returns ------- This instance of :class:`Pipette`. Examples -------- .. >>> from opentrons import instruments, robot # doctest: +SKIP >>> robot.reset() # doctest: +SKIP >>> p300 = instruments.P300_Single(mount='left') # doctest: +SKIP >>> p300.aspirate(50, plate[0]) # doctest: +SKIP >>> p300.air_gap(50) # doctest: +SKIP """ if not self.tip_attached: log.warning("Cannot perform air_gap without a tip attached.") if height is None: height = 5 do_publish(self.broker, commands.air_gap, self.air_gap, 'before', self, None, self, volume, height) # if volumes is specified as 0uL, do nothing if volume != 0: location = self.previous_placeable.top(height) # "move_to" separate from aspirate command # so "_position_for_aspirate" isn't executed self.move_to(location) self.aspirate(volume) do_publish(self.broker, commands.air_gap, self.air_gap, 'after', self, None, self, volume, height) return self
Pull air into the :any:`Pipette` current tip Notes ----- If no `location` is passed, the pipette will touch_tip from it's current position. Parameters ---------- volume : number The amount in uL to aspirate air into the tube. (Default will use all remaining volume in tip) height : number The number of millimiters to move above the current Placeable to perform and air-gap aspirate (Default will be 10mm above current Placeable) Returns ------- This instance of :class:`Pipette`. Examples -------- .. >>> from opentrons import instruments, robot # doctest: +SKIP >>> robot.reset() # doctest: +SKIP >>> p300 = instruments.P300_Single(mount='left') # doctest: +SKIP >>> p300.aspirate(50, plate[0]) # doctest: +SKIP >>> p300.air_gap(50) # doctest: +SKIP
def _get_question_map(self, question_id): """get question map from questions matching question_id This can make sense of both Section assigned Ids or normal Question/Item Ids """ if question_id.get_authority() == ASSESSMENT_AUTHORITY: key = '_id' match_value = ObjectId(question_id.get_identifier()) else: key = 'questionId' match_value = str(question_id) for question_map in self._my_map['questions']: if question_map[key] == match_value: return question_map raise errors.NotFound()
get question map from questions matching question_id This can make sense of both Section assigned Ids or normal Question/Item Ids
def writeProxy(self, obj): """ Encodes a proxied object to the stream. @since: 0.6 """ proxy = self.context.getProxyForObject(obj) self.writeObject(proxy, is_proxy=True)
Encodes a proxied object to the stream. @since: 0.6
def match_range(self, field, start=None, stop=None, inclusive=True, required=True, new_group=False): """Add a ``field:[some range]`` term to the query. Matches will have a ``value`` in the range in the ``field``. Arguments: field (str): The field to check for the value. The field must be namespaced according to Elasticsearch rules using the dot syntax. For example, ``"mdf.source_name"`` is the ``source_name`` field of the ``mdf`` dictionary. start (str or int): The starting value, or ``None`` for no lower bound. **Default:** ``None``. stop (str or int): The ending value, or ``None`` for no upper bound. **Default:** ``None``. inclusive (bool): If ``True``, the ``start`` and ``stop`` values will be included in the search. If ``False``, the start and stop values will not be included in the search. **Default:** ``True``. required (bool): If ``True``, will add term with ``AND``. If ``False``, will use ``OR``. **Default:** ``True``. new_group (bool): If ``True``, will separate the term into a new parenthetical group. If ``False``, will not. **Default:** ``False``. Returns: SearchHelper: Self """ # Accept None as * if start is None: start = "*" if stop is None: stop = "*" # *-* is the same as field exists if start == "*" and stop == "*": return self.match_exists(field, required=required, new_group=new_group) if inclusive: value = "[" + str(start) + " TO " + str(stop) + "]" else: value = "{" + str(start) + " TO " + str(stop) + "}" return self.match_field(field, value, required=required, new_group=new_group)
Add a ``field:[some range]`` term to the query. Matches will have a ``value`` in the range in the ``field``. Arguments: field (str): The field to check for the value. The field must be namespaced according to Elasticsearch rules using the dot syntax. For example, ``"mdf.source_name"`` is the ``source_name`` field of the ``mdf`` dictionary. start (str or int): The starting value, or ``None`` for no lower bound. **Default:** ``None``. stop (str or int): The ending value, or ``None`` for no upper bound. **Default:** ``None``. inclusive (bool): If ``True``, the ``start`` and ``stop`` values will be included in the search. If ``False``, the start and stop values will not be included in the search. **Default:** ``True``. required (bool): If ``True``, will add term with ``AND``. If ``False``, will use ``OR``. **Default:** ``True``. new_group (bool): If ``True``, will separate the term into a new parenthetical group. If ``False``, will not. **Default:** ``False``. Returns: SearchHelper: Self
def url_request(target_url, output_file): """ Use urllib to download the requested file from the target URL. Use the click progress bar to print download progress :param target_url: URL from which the file is to be downloaded :param output_file: Name and path of local copy of file """ # Create the request request = urllib.request.urlopen(target_url) # Open the destination file to write with open(output_file, 'wb') as targets: # Calculate the total file size - will be used by the progress bar total_length = int(request.headers.get('content-length')) # Create a click progress bar using the total length calculated above with click.progressbar(length=total_length, label='Downloading files') as bar: while True: # Break up the download into chunks of 4096 bytes data = request.read(4096) # Break the loop when the download finishes/errors if not data: break # Write the chunk to file targets.write(data) # Update the progress bar bar.update(len(data))
Use urllib to download the requested file from the target URL. Use the click progress bar to print download progress :param target_url: URL from which the file is to be downloaded :param output_file: Name and path of local copy of file
def bitmask(*args): """! @brief Returns a mask with specified bit ranges set. An integer mask is generated based on the bits and bit ranges specified by the arguments. Any number of arguments can be provided. Each argument may be either a 2-tuple of integers, a list of integers, or an individual integer. The result is the combination of masks produced by the arguments. - 2-tuple: The tuple is a bit range with the first element being the MSB and the second element the LSB. All bits from LSB up to and included MSB are set. - list: Each bit position specified by the list elements is set. - int: The specified bit position is set. @return An integer mask value computed from the logical OR'ing of masks generated by each argument. Example: @code >>> hex(bitmask((23,17),1)) 0xfe0002 >>> hex(bitmask([4,0,2],(31,24)) 0xff000015 @endcode """ mask = 0 for a in args: if type(a) is tuple: for b in range(a[1], a[0]+1): mask |= 1 << b elif type(a) is list: for b in a: mask |= 1 << b elif type(a) is int: mask |= 1 << a return mask
! @brief Returns a mask with specified bit ranges set. An integer mask is generated based on the bits and bit ranges specified by the arguments. Any number of arguments can be provided. Each argument may be either a 2-tuple of integers, a list of integers, or an individual integer. The result is the combination of masks produced by the arguments. - 2-tuple: The tuple is a bit range with the first element being the MSB and the second element the LSB. All bits from LSB up to and included MSB are set. - list: Each bit position specified by the list elements is set. - int: The specified bit position is set. @return An integer mask value computed from the logical OR'ing of masks generated by each argument. Example: @code >>> hex(bitmask((23,17),1)) 0xfe0002 >>> hex(bitmask([4,0,2],(31,24)) 0xff000015 @endcode
def add_product_version_to_build_configuration(id=None, name=None, product_version_id=None): """ Associate an existing ProductVersion with a BuildConfiguration """ data = remove_product_version_from_build_configuration_raw(id, name, product_version_id) if data: return utils.format_json_list(data)
Associate an existing ProductVersion with a BuildConfiguration
def make_published(self, request, queryset): """ Bulk action to mark selected posts as published. If the date_published field is empty the current time is saved as date_published. queryset must not be empty (ensured by DjangoCMS). """ cnt1 = queryset.filter( date_published__isnull=True, publish=False, ).update(date_published=timezone.now(), publish=True) cnt2 = queryset.filter( date_published__isnull=False, publish=False, ).update(publish=True) messages.add_message( request, messages.INFO, __('%(updates)d entry published.', '%(updates)d entries published.', cnt1+cnt2) % { 'updates': cnt1+cnt2, })
Bulk action to mark selected posts as published. If the date_published field is empty the current time is saved as date_published. queryset must not be empty (ensured by DjangoCMS).
def css(self, *props, **kwprops): """Adds css properties to this element.""" self._stable = False styles = {} if props: if len(props) == 1 and isinstance(props[0], Mapping): styles = props[0] else: raise WrongContentError(self, props, "Arguments not valid") elif kwprops: styles = kwprops else: raise WrongContentError(self, None, "args OR wkargs are needed") return self.attr(style=styles)
Adds css properties to this element.
def list_all_categories(cls, **kwargs): """List Categories Return a list of Categories This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_categories(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Category] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_categories_with_http_info(**kwargs) else: (data) = cls._list_all_categories_with_http_info(**kwargs) return data
List Categories Return a list of Categories This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_categories(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Category] If the method is called asynchronously, returns the request thread.
def _scroll(clicks, x=None, y=None): """Send the mouse vertical scroll event to Windows by calling the mouse_event() win32 function. Args: clicks (int): The amount of scrolling to do. A positive value is the mouse wheel moving forward (scrolling up), a negative value is backwards (down). x (int): The x position of the mouse event. y (int): The y position of the mouse event. Returns: None """ startx, starty = _position() width, height = _size() if x is None: x = startx else: if x < 0: x = 0 elif x >= width: x = width - 1 if y is None: y = starty else: if y < 0: y = 0 elif y >= height: y = height - 1 try: _sendMouseEvent(MOUSEEVENTF_WHEEL, x, y, dwData=clicks) except (PermissionError, OSError): # TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60 pass
Send the mouse vertical scroll event to Windows by calling the mouse_event() win32 function. Args: clicks (int): The amount of scrolling to do. A positive value is the mouse wheel moving forward (scrolling up), a negative value is backwards (down). x (int): The x position of the mouse event. y (int): The y position of the mouse event. Returns: None
def active_url(context, urls, css=None): """ Highlight menu item based on url tag. Returns a css class if ``request.path`` is in given ``url``. :param url: Django url to be reversed. :param css: Css class to be returned for highlighting. Return active if none set. """ request = context['request'] if request.get_full_path in (reverse(url) for url in urls.split()): return css if css else 'active' return ''
Highlight menu item based on url tag. Returns a css class if ``request.path`` is in given ``url``. :param url: Django url to be reversed. :param css: Css class to be returned for highlighting. Return active if none set.
def i4_sobol_generate(dim_num, n, skip=1): """ i4_sobol_generate generates a Sobol dataset. Parameters: Input, integer dim_num, the spatial dimension. Input, integer N, the number of points to generate. Input, integer SKIP, the number of initial points to skip. Output, real R(M,N), the points. """ r = np.full((n, dim_num), np.nan) for j in range(n): seed = j + skip r[j, 0:dim_num], next_seed = i4_sobol(dim_num, seed) return r
i4_sobol_generate generates a Sobol dataset. Parameters: Input, integer dim_num, the spatial dimension. Input, integer N, the number of points to generate. Input, integer SKIP, the number of initial points to skip. Output, real R(M,N), the points.
def subsets_of_fileinfo_from_txt(filename): """Returns a dictionary with subsets of FileInfo instances from a TXT file. Each subset of files must be preceded by a line: @ <number> <label> where <number> indicates the number of files in that subset, and <label> is a label for that subset. Any additional text beyond <label> in the same line is ignored. Note that blank lines or lines starting by the hash symbol are also ignored. The name of the files comprising each subset will be obtained from the first contiguous character string in every line (thus, the rest of the line will be discarded). Parameters ---------- filename : string Name of a TXT file containing a list of FITS files grouped in different subsets by the @ symbol. Returns ------- dict_of_subsets_of_fileinfo : dictionary Dictionary containing as many entries as different subsets of files available. Each value of the dictionary is a dictionary with a label (sequential number starting at zero) and the list of FileInfo instances within subset. """ # check for input file if not os.path.isfile(filename): raise ValueError("File " + filename + " not found!") # read input file with open(filename) as f: file_content = f.read().splitlines() # obtain the different subsets of files dict_of_subsets_of_fileinfo = {} label = None sublist_of_fileinfo = [] idict = 0 ifiles = 0 nfiles = 0 sublist_finished = True for line in file_content: if len(line) > 0: if line[0] != '#': if label is None: if line[0] == "@": nfiles = int(line[1:].split()[0]) label = line[1:].split()[1] sublist_of_fileinfo = [] ifiles = 0 sublist_finished = False else: raise ValueError("Expected @ symbol not found!") else: if line[0] == "@": raise ValueError("Unexpected @ symbol found!") tmplist = line.split() tmpfile = tmplist[0] if len(tmplist) > 1: tmpinfo = tmplist[1:] else: tmpinfo = None if not os.path.isfile(tmpfile): raise ValueError("File " + tmpfile + " not found!") sublist_of_fileinfo.append(FileInfo(tmpfile, tmpinfo)) ifiles += 1 if ifiles == nfiles: dict_of_subsets_of_fileinfo[idict] = {} tmpdict = dict_of_subsets_of_fileinfo[idict] tmpdict['label'] = label tmpdict['list_of_fileinfo'] = sublist_of_fileinfo idict += 1 label = None sublist_of_fileinfo = [] ifiles = 0 sublist_finished = True if not sublist_finished: raise ValueError("Unexpected end of sublist of files.") return dict_of_subsets_of_fileinfo
Returns a dictionary with subsets of FileInfo instances from a TXT file. Each subset of files must be preceded by a line: @ <number> <label> where <number> indicates the number of files in that subset, and <label> is a label for that subset. Any additional text beyond <label> in the same line is ignored. Note that blank lines or lines starting by the hash symbol are also ignored. The name of the files comprising each subset will be obtained from the first contiguous character string in every line (thus, the rest of the line will be discarded). Parameters ---------- filename : string Name of a TXT file containing a list of FITS files grouped in different subsets by the @ symbol. Returns ------- dict_of_subsets_of_fileinfo : dictionary Dictionary containing as many entries as different subsets of files available. Each value of the dictionary is a dictionary with a label (sequential number starting at zero) and the list of FileInfo instances within subset.
def build(self, message): """buffer all the streaming messages based on the message id. Reconstruct all fragments together. :param message: incoming message :return: next complete message or None if streaming is not done """ context = None if message.message_type in [Types.CALL_REQ, Types.CALL_RES]: self.verify_message(message) context = self.build_context(message) # streaming message if message.flags == common.FlagsType.fragment: self.message_buffer[message.id] = context # find the incompleted stream num = 0 for i, arg in enumerate(context.argstreams): if arg.state != StreamState.completed: num = i break self.close_argstream(context, num) return context elif message.message_type in [Types.CALL_REQ_CONTINUE, Types.CALL_RES_CONTINUE]: context = self.message_buffer.get(message.id) if context is None: # missing call msg before continue msg raise FatalProtocolError( "missing call message after receiving continue message", message.id, ) # find the incompleted stream dst = 0 for i, arg in enumerate(context.argstreams): if arg.state != StreamState.completed: dst = i break try: self.verify_message(message) except InvalidChecksumError as e: context.argstreams[dst].set_exception(e) raise src = 0 while src < len(message.args): context.argstreams[dst].write(message.args[src]) dst += 1 src += 1 if message.flags != FlagsType.fragment: # get last fragment. mark it as completed assert (len(context.argstreams) == CallContinueMessage.max_args_num) self.message_buffer.pop(message.id, None) context.flags = FlagsType.none self.close_argstream(context, dst - 1) return None elif message.message_type == Types.ERROR: context = self.message_buffer.pop(message.id, None) if context is None: log.info('Unconsumed error %s', message) return None else: error = TChannelError.from_code( message.code, description=message.description, tracing=context.tracing, ) context.set_exception(error) return error else: return message
buffer all the streaming messages based on the message id. Reconstruct all fragments together. :param message: incoming message :return: next complete message or None if streaming is not done
def tree_match(cls, field, string): ''' Given a tree index, retrieves the ids atached to the given prefix, think of if as a mechanism for pattern suscription, where two models attached to the `a`, `a:b` respectively are found by the `a:b` string, because both model's subscription key matches the string. ''' if not string: return set() redis = cls.get_redis() prefix = '{}:tree_{}'.format(cls.cls_key(), field) pieces = string.split(':') ans = redis.sunion( prefix + ':' + ':'.join(pieces[0:i+1]) for i in range(len(pieces)) ) return sorted(map( lambda id: cls.get(id), map( debyte_string, ans ) ), key=lambda x:x.id)
Given a tree index, retrieves the ids atached to the given prefix, think of if as a mechanism for pattern suscription, where two models attached to the `a`, `a:b` respectively are found by the `a:b` string, because both model's subscription key matches the string.
def to_json(self): """Exports the object to a JSON friendly dict Returns: Dict representation of the object """ return { 'userId': self.user_id, 'username': self.username, 'roles': self.roles, 'authSystem': self.auth_system }
Exports the object to a JSON friendly dict Returns: Dict representation of the object
def reserve_items(self, parent_item, *items): """Reserve a set of items until a parent item is returned. Prevent ``check_out_item()`` from returning any of ``items`` until ``parent_item`` is completed or times out. For each item, if it is not already checked out or reserved by some other parent item, it is associated with ``parent_item``, and the reservation will be released when ``parent_item`` completes or times out. Returns a list that is a subset of ``items`` for which we could get the reservation. Raises ``LostLease`` if this queue instance no longer owns ``parent_item``. If any of the items do not exist, they are silently ignored. """ conn = redis.StrictRedis(connection_pool=self.pool) self._run_expiration(conn) script = conn.register_script(""" -- expired? if redis.call("hget", KEYS[2], "i" .. ARGV[1]) ~= "w" .. ARGV[2] then return -1 end -- loop through each item local result = {} for i = 3, #ARGV do local item = ARGV[i] -- item must be available to reserve if redis.call("zscore", KEYS[1], item) then redis.call("zrem", KEYS[1], item) redis.call("sadd", KEYS[3], item) result[#result + 1] = item end end return result """) result = script(keys=[self._key_available(), self._key_workers(), self._key_reservations(parent_item)], args=([parent_item, self._get_worker_id(conn)] + list(items))) if result == -1: raise LostLease(parent_item) return result
Reserve a set of items until a parent item is returned. Prevent ``check_out_item()`` from returning any of ``items`` until ``parent_item`` is completed or times out. For each item, if it is not already checked out or reserved by some other parent item, it is associated with ``parent_item``, and the reservation will be released when ``parent_item`` completes or times out. Returns a list that is a subset of ``items`` for which we could get the reservation. Raises ``LostLease`` if this queue instance no longer owns ``parent_item``. If any of the items do not exist, they are silently ignored.
def to_json(self): """ Prepare data for the initial state of the admin-on-rest """ endpoints = [] for endpoint in self.endpoints: list_fields = endpoint.fields resource_type = endpoint.Meta.resource_type table = endpoint.Meta.table data = endpoint.to_dict() data['fields'] = resource_type.get_type_of_fields( list_fields, table, ) endpoints.append(data) data = { 'title': self.title, 'endpoints': sorted(endpoints, key=lambda x: x['name']), } return json.dumps(data)
Prepare data for the initial state of the admin-on-rest
def skip(cb, msg, attributes): """ Skips applying transforms if data is not geographic. """ if not all(a in msg for a in attributes): return True plot = get_cb_plot(cb) return (not getattr(plot, 'geographic', False) or not hasattr(plot.current_frame, 'crs'))
Skips applying transforms if data is not geographic.
def logged_exception(self, e): """Record the exception, but don't log it; it's already been logged :param e: Exception to log. """ if str(e) not in self._errors: self._errors.append(str(e)) self.set_error_state() self.buildstate.state.exception_type = str(e.__class__.__name__) self.buildstate.state.exception = str(e)
Record the exception, but don't log it; it's already been logged :param e: Exception to log.
def project_update(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /project-xxxx/update API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject-xxxx%2Fupdate """ return DXHTTPRequest('/%s/update' % object_id, input_params, always_retry=always_retry, **kwargs)
Invokes the /project-xxxx/update API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject-xxxx%2Fupdate
def guest_create_network_interface(self, userid, os_version, guest_networks, active=False): """ Create network interface(s) for the guest inux system. It will create the nic for the guest, add NICDEF record into the user direct. It will also construct network interface configuration files and punch the files to the guest. These files will take effect when initializing and configure guest. :param str userid: the user id of the guest :param str os_version: operating system version of the guest :param list guest_networks: a list of network info for the guest. It has one dictionary that contain some of the below keys for each network, the format is: {'ip_addr': (str) IP address or None, 'dns_addr': (list) dns addresses or None, 'gateway_addr': (str) gateway address or None, 'cidr': (str) cidr format, 'nic_vdev': (str)nic VDEV, 1- to 4- hexadecimal digits or None, 'nic_id': (str) nic identifier or None, 'mac_addr': (str) mac address or None, it is only be used when changing the guest's user direct. Format should be xx:xx:xx:xx:xx:xx, and x is a hexadecimal digit 'osa_device': (str) OSA address or None} Example for guest_networks: [{'ip_addr': '192.168.95.10', 'dns_addr': ['9.0.2.1', '9.0.3.1'], 'gateway_addr': '192.168.95.1', 'cidr': "192.168.95.0/24", 'nic_vdev': '1000', 'mac_addr': '02:00:00:12:34:56'}, {'ip_addr': '192.168.96.10', 'dns_addr': ['9.0.2.1', '9.0.3.1'], 'gateway_addr': '192.168.96.1', 'cidr': "192.168.96.0/24", 'nic_vdev': '1003}] :param bool active: whether add a nic on active guest system :returns: guest_networks list, including nic_vdev for each network :rtype: list """ if len(guest_networks) == 0: errmsg = ("API guest_create_network_interface: " "Network information is required but not provided") raise exception.SDKInvalidInputFormat(msg=errmsg) for network in guest_networks: vdev = nic_id = mac_addr = ip_addr = OSA = None if 'nic_vdev' in network.keys(): vdev = network['nic_vdev'] if 'osa_device' in network.keys(): OSA = network['osa_device'] if 'nic_id' in network.keys(): nic_id = network['nic_id'] if (('mac_addr' in network.keys()) and (network['mac_addr'] is not None)): mac_addr = network['mac_addr'] if not zvmutils.valid_mac_addr(mac_addr): errmsg = ("API guest_create_network_interface: " "Invalid mac address, format should be " "xx:xx:xx:xx:xx:xx, and x is a hexadecimal " "digit") raise exception.SDKInvalidInputFormat(msg=errmsg) if (('ip_addr' in network.keys()) and (network['ip_addr'] is not None)): ip_addr = network['ip_addr'] if not netaddr.valid_ipv4(ip_addr): errmsg = ("API guest_create_network_interface: " "Invalid management IP address, it should be " "the value between 0.0.0.0 and 255.255.255.255") raise exception.SDKInvalidInputFormat(msg=errmsg) if (('dns_addr' in network.keys()) and (network['dns_addr'] is not None)): if not isinstance(network['dns_addr'], list): raise exception.SDKInvalidInputTypes( 'guest_config_network', str(list), str(type(network['dns_addr']))) for dns in network['dns_addr']: if not netaddr.valid_ipv4(dns): errmsg = ("API guest_create_network_interface: " "Invalid dns IP address, it should be the " "value between 0.0.0.0 and 255.255.255.255") raise exception.SDKInvalidInputFormat(msg=errmsg) if (('gateway_addr' in network.keys()) and (network['gateway_addr'] is not None)): if not netaddr.valid_ipv4( network['gateway_addr']): errmsg = ("API guest_create_network_interface: " "Invalid gateway IP address, it should be " "the value between 0.0.0.0 and 255.255.255.255") raise exception.SDKInvalidInputFormat(msg=errmsg) if (('cidr' in network.keys()) and (network['cidr'] is not None)): if not zvmutils.valid_cidr(network['cidr']): errmsg = ("API guest_create_network_interface: " "Invalid CIDR, format should be a.b.c.d/n, and " "a.b.c.d is IP address, n is the value " "between 0-32") raise exception.SDKInvalidInputFormat(msg=errmsg) try: if OSA is None: used_vdev = self._networkops.create_nic(userid, vdev=vdev, nic_id=nic_id, mac_addr=mac_addr, active=active) else: used_vdev = self._networkops.dedicate_OSA(userid, OSA, vdev=vdev, active=active) network['nic_vdev'] = used_vdev except exception.SDKBaseException: LOG.error(('Failed to create nic on vm %s') % userid) raise try: self._networkops.network_configuration(userid, os_version, guest_networks, active=active) except exception.SDKBaseException: LOG.error(('Failed to set network configuration file on vm %s') % userid) raise return guest_networks
Create network interface(s) for the guest inux system. It will create the nic for the guest, add NICDEF record into the user direct. It will also construct network interface configuration files and punch the files to the guest. These files will take effect when initializing and configure guest. :param str userid: the user id of the guest :param str os_version: operating system version of the guest :param list guest_networks: a list of network info for the guest. It has one dictionary that contain some of the below keys for each network, the format is: {'ip_addr': (str) IP address or None, 'dns_addr': (list) dns addresses or None, 'gateway_addr': (str) gateway address or None, 'cidr': (str) cidr format, 'nic_vdev': (str)nic VDEV, 1- to 4- hexadecimal digits or None, 'nic_id': (str) nic identifier or None, 'mac_addr': (str) mac address or None, it is only be used when changing the guest's user direct. Format should be xx:xx:xx:xx:xx:xx, and x is a hexadecimal digit 'osa_device': (str) OSA address or None} Example for guest_networks: [{'ip_addr': '192.168.95.10', 'dns_addr': ['9.0.2.1', '9.0.3.1'], 'gateway_addr': '192.168.95.1', 'cidr': "192.168.95.0/24", 'nic_vdev': '1000', 'mac_addr': '02:00:00:12:34:56'}, {'ip_addr': '192.168.96.10', 'dns_addr': ['9.0.2.1', '9.0.3.1'], 'gateway_addr': '192.168.96.1', 'cidr': "192.168.96.0/24", 'nic_vdev': '1003}] :param bool active: whether add a nic on active guest system :returns: guest_networks list, including nic_vdev for each network :rtype: list
def extract(self, msg): """Yield an ordered dictionary if msg['type'] is in keys_by_type.""" def normal(key): v = msg.get(key) if v is None: return v normalizer = self.normalizers.get(key, lambda x: x) return normalizer(v) def odict(keys): return collections.OrderedDict((k, normal(k)) for k in keys) def match(m): return (msg.get(k) in v for k, v in m.items()) if m else () accept = all(match(self.accept)) reject = any(match(self.reject)) if reject or not accept: keys = () elif self.keys_by_type is None: keys = [k for k in msg.keys() if k not in self.omit] else: keys = self.keys_by_type.get(msg.get('type')) return odict(keys)
Yield an ordered dictionary if msg['type'] is in keys_by_type.
def get_collection(self, lang=None, task=None): """ Return the collection that represents a specific language or task. Args: lang (string): Language code. task (string): Task name. """ if lang: id = "{}{}".format(Downloader.LANG_PREFIX, lang) elif task: id = "{}{}".format(Downloader.TASK_PREFIX, task) else: raise ValueError("You should pass either the task or the lang") try: return self.info(id) except ValueError as e: if lang: raise LanguageNotSupported("Language {} is not supported".format(id)) if task: raise TaskNotSupported("Task {} is not supported".format(id))
Return the collection that represents a specific language or task. Args: lang (string): Language code. task (string): Task name.
def delete_variable(self, key): """Deletes a global variable :param key: the key of the global variable to be deleted :raises exceptions.AttributeError: if the global variable does not exist """ key = str(key) if self.is_locked(key): raise RuntimeError("Global variable is locked") with self.__global_lock: if key in self.__global_variable_dictionary: access_key = self.lock_variable(key, block=True) del self.__global_variable_dictionary[key] self.unlock_variable(key, access_key) del self.__variable_locks[key] del self.__variable_references[key] else: raise AttributeError("Global variable %s does not exist!" % str(key)) logger.debug("Global variable %s was deleted!" % str(key))
Deletes a global variable :param key: the key of the global variable to be deleted :raises exceptions.AttributeError: if the global variable does not exist
def _cmp_by_router_id(local_asn, path1, path2): """Select the route received from the peer with the lowest BGP router ID. If both paths are eBGP paths, then we do not do any tie breaking, i.e we do not pick best-path based on this criteria. RFC: http://tools.ietf.org/html/rfc5004 We pick best path between two iBGP paths as usual. """ def get_asn(path_source): if path_source is None: return local_asn else: return path_source.remote_as def get_router_id(path, local_bgp_id): path_source = path.source if path_source is None: return local_bgp_id else: originator_id = path.get_pattr(BGP_ATTR_TYPE_ORIGINATOR_ID) if originator_id: return originator_id.value return path_source.protocol.recv_open_msg.bgp_identifier path_source1 = path1.source path_source2 = path2.source # If both paths are from NC we have same router Id, hence cannot compare. if path_source1 is None and path_source2 is None: return None asn1 = get_asn(path_source1) asn2 = get_asn(path_source2) is_ebgp1 = asn1 != local_asn is_ebgp2 = asn2 != local_asn # If both paths are from eBGP peers, then according to RFC we need # not tie break using router id. if is_ebgp1 and is_ebgp2: return None if ((is_ebgp1 is True and is_ebgp2 is False) or (is_ebgp1 is False and is_ebgp2 is True)): raise ValueError('This method does not support comparing ebgp with' ' ibgp path') # At least one path is not coming from NC, so we get local bgp id. if path_source1 is not None: local_bgp_id = path_source1.protocol.sent_open_msg.bgp_identifier else: local_bgp_id = path_source2.protocol.sent_open_msg.bgp_identifier # Get router ids. router_id1 = get_router_id(path1, local_bgp_id) router_id2 = get_router_id(path2, local_bgp_id) # If both router ids are same/equal we cannot decide. # This case is possible since router ids are arbitrary. if router_id1 == router_id2: return None # Select the path with lowest router Id. from ryu.services.protocols.bgp.utils.bgp import from_inet_ptoi if from_inet_ptoi(router_id1) < from_inet_ptoi(router_id2): return path1 else: return path2
Select the route received from the peer with the lowest BGP router ID. If both paths are eBGP paths, then we do not do any tie breaking, i.e we do not pick best-path based on this criteria. RFC: http://tools.ietf.org/html/rfc5004 We pick best path between two iBGP paths as usual.
def discord_to_users(self, memberlist): """ expects a list of discord.py user objects returns a list of TrainerDex.py user objects """ _memberlist = self.get_discord_user(x.id for x in memberlist) return list(set(x.owner() for x in _memberlist))
expects a list of discord.py user objects returns a list of TrainerDex.py user objects
def get_iam_policy(self, client=None): """Retrieve the IAM policy for the bucket. See https://cloud.google.com/storage/docs/json_api/v1/buckets/getIamPolicy If :attr:`user_project` is set, bills the API request to that project. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :rtype: :class:`google.api_core.iam.Policy` :returns: the policy instance, based on the resource returned from the ``getIamPolicy`` API request. """ client = self._require_client(client) query_params = {} if self.user_project is not None: query_params["userProject"] = self.user_project info = client._connection.api_request( method="GET", path="%s/iam" % (self.path,), query_params=query_params, _target_object=None, ) return Policy.from_api_repr(info)
Retrieve the IAM policy for the bucket. See https://cloud.google.com/storage/docs/json_api/v1/buckets/getIamPolicy If :attr:`user_project` is set, bills the API request to that project. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :rtype: :class:`google.api_core.iam.Policy` :returns: the policy instance, based on the resource returned from the ``getIamPolicy`` API request.
def write(filepath, data, **kwargs): """ Write a file. Supported formats: * CSV * JSON, JSONL * pickle Parameters ---------- filepath : str Path to the file that should be read. This methods action depends mainly on the file extension. data : dict or list Content that should be written kwargs : dict Any keywords for the specific file format. Returns ------- data : str or bytes """ if filepath.lower().endswith('.csv'): return _write_csv(filepath, data, kwargs) elif filepath.lower().endswith('.json'): return _write_json(filepath, data, kwargs) elif filepath.lower().endswith('.jsonl'): return _write_jsonl(filepath, data, kwargs) elif filepath.lower().endswith('.pickle'): return _write_pickle(filepath, data, kwargs) elif (filepath.lower().endswith('.yml') or filepath.lower().endswith('.yaml')): raise NotImplementedError('YAML is not supported, because you need ' 'PyYAML in Python3. ' 'See ' 'https://stackoverflow.com/a/42054860/562769' ' as a guide how to use it.') elif (filepath.lower().endswith('.h5') or filepath.lower().endswith('.hdf5')): raise NotImplementedError('YAML is not supported. See ' 'https://stackoverflow.com/a/41586571/562769' ' as a guide how to use it.') else: raise NotImplementedError('File \'{}\' is not known.'.format(filepath))
Write a file. Supported formats: * CSV * JSON, JSONL * pickle Parameters ---------- filepath : str Path to the file that should be read. This methods action depends mainly on the file extension. data : dict or list Content that should be written kwargs : dict Any keywords for the specific file format. Returns ------- data : str or bytes
def xmatch_kdtree(kdtree, extra, extdecl, xmatchdistdeg, closestonly=True): '''This cross-matches between `kdtree` and (`extra`, `extdecl`) arrays. Returns the indices of the kdtree and the indices of extra, extdecl that xmatch successfully. Parameters ---------- kdtree : scipy.spatial.CKDTree This is a kdtree object generated by the `make_kdtree` function. extra,extdecl : array-like These are np.arrays of 'external' coordinates in decimal degrees that will be cross-matched against the objects in `kdtree`. xmatchdistdeg : float The match radius to use for the cross-match in decimal degrees. closestonly : bool If closestonly is True, then this function returns only the closest matching indices in (extra, extdecl) for each object in kdtree if there are any matches. Otherwise, it returns a list of indices in (extra, extdecl) for all matches within xmatchdistdeg between kdtree and (extra, extdecl). Returns ------- tuple of lists Returns a tuple of the form:: (list of `kdtree` indices matching to external objects, list of all `extra`/`extdecl` indices that match to each element in `kdtree` within the specified cross-match distance) ''' ext_cosdecl = np.cos(np.radians(extdecl)) ext_sindecl = np.sin(np.radians(extdecl)) ext_cosra = np.cos(np.radians(extra)) ext_sinra = np.sin(np.radians(extra)) ext_xyz = np.column_stack((ext_cosra*ext_cosdecl, ext_sinra*ext_cosdecl, ext_sindecl)) ext_xyzdist = 2.0 * np.sin(np.radians(xmatchdistdeg)/2.0) # get our kdtree our_kdt = kdtree # get the external kdtree ext_kdt = sps.cKDTree(ext_xyz) # do a query_ball_tree extkd_matchinds = our_kdt.query_ball_tree(ext_kdt, ext_xyzdist) ext_matchinds = [] kdt_matchinds = [] for extind, mind in enumerate(extkd_matchinds): if len(mind) > 0: # our object indices kdt_matchinds.append(extind) # external object indices if closestonly: ext_matchinds.append(mind[0]) else: ext_matchinds.append(mind) return kdt_matchinds, ext_matchinds
This cross-matches between `kdtree` and (`extra`, `extdecl`) arrays. Returns the indices of the kdtree and the indices of extra, extdecl that xmatch successfully. Parameters ---------- kdtree : scipy.spatial.CKDTree This is a kdtree object generated by the `make_kdtree` function. extra,extdecl : array-like These are np.arrays of 'external' coordinates in decimal degrees that will be cross-matched against the objects in `kdtree`. xmatchdistdeg : float The match radius to use for the cross-match in decimal degrees. closestonly : bool If closestonly is True, then this function returns only the closest matching indices in (extra, extdecl) for each object in kdtree if there are any matches. Otherwise, it returns a list of indices in (extra, extdecl) for all matches within xmatchdistdeg between kdtree and (extra, extdecl). Returns ------- tuple of lists Returns a tuple of the form:: (list of `kdtree` indices matching to external objects, list of all `extra`/`extdecl` indices that match to each element in `kdtree` within the specified cross-match distance)
def redirect_n_times(n): """302 Redirects n times. --- tags: - Redirects parameters: - in: path name: n type: int produces: - text/html responses: 302: description: A redirection. """ assert n > 0 absolute = request.args.get("absolute", "false").lower() == "true" if n == 1: return redirect(url_for("view_get", _external=absolute)) if absolute: return _redirect("absolute", n, True) else: return _redirect("relative", n, False)
302 Redirects n times. --- tags: - Redirects parameters: - in: path name: n type: int produces: - text/html responses: 302: description: A redirection.
def validate_windows_cred_winexe(host, username='Administrator', password=None, retries=10, retry_delay=1): ''' Check if the windows credentials are valid ''' cmd = "winexe -U '{0}%{1}' //{2} \"hostname\"".format( username, password, host ) logging_cmd = "winexe -U '{0}%XXX-REDACTED-XXX' //{1} \"hostname\"".format( username, host ) for i in range(retries): ret_code = win_cmd( cmd, logging_command=logging_cmd ) return ret_code == 0
Check if the windows credentials are valid
def add_bookmark(self, time): """Run this function when user adds a new bookmark. Parameters ---------- time : tuple of float start and end of the new bookmark, in s """ if self.annot is None: # remove if buttons are disabled msg = 'No score file loaded' lg.debug(msg) error_dialog = QErrorMessage() error_dialog.setWindowTitle('Error adding bookmark') error_dialog.showMessage(msg) error_dialog.exec() return answer = QInputDialog.getText(self, 'New Bookmark', 'Enter bookmark\'s name') if answer[1]: name = answer[0] self.annot.add_bookmark(name, time) lg.info('Added Bookmark ' + name + 'at ' + str(time)) self.update_annotations()
Run this function when user adds a new bookmark. Parameters ---------- time : tuple of float start and end of the new bookmark, in s
def _set_show_mpls_ldp_path(self, v, load=False): """ Setter method for show_mpls_ldp_path, mapped from YANG variable /brocade_mpls_rpc/show_mpls_ldp_path (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_show_mpls_ldp_path is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_show_mpls_ldp_path() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=show_mpls_ldp_path.show_mpls_ldp_path, is_leaf=True, yang_name="show-mpls-ldp-path", rest_name="show-mpls-ldp-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'full', u'actionpoint': u'showMplsLdpPath'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """show_mpls_ldp_path must be of a type compatible with rpc""", 'defined-type': "rpc", 'generated-type': """YANGDynClass(base=show_mpls_ldp_path.show_mpls_ldp_path, is_leaf=True, yang_name="show-mpls-ldp-path", rest_name="show-mpls-ldp-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'full', u'actionpoint': u'showMplsLdpPath'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)""", }) self.__show_mpls_ldp_path = t if hasattr(self, '_set'): self._set()
Setter method for show_mpls_ldp_path, mapped from YANG variable /brocade_mpls_rpc/show_mpls_ldp_path (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_show_mpls_ldp_path is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_show_mpls_ldp_path() directly.
def make_orthographic_map(central_longitude=0, central_latitude=0, figsize=(8, 8), add_land=True, land_color='tan', add_ocean=False, ocean_color='lightblue', grid_lines=True, lat_grid=[-80., -60., -30., 0., 30., 60., 80.], lon_grid=[-180., -150., -120., -90., -60., -30., 0., 30., 60., 90., 120., 150., 180.]): ''' Function creates and returns an orthographic map projection using cartopy Example ------- >>> map_axis = make_orthographic_map(central_longitude=200,central_latitude=30) Optional Parameters ----------- central_longitude : central longitude of projection (default is 0) central_latitude : central latitude of projection (default is 0) figsize : size of the figure (default is 8x8) add_land : chose whether land is plotted on map (default is true) land_color : specify land color (default is 'tan') add_ocean : chose whether land is plotted on map (default is False, change to True to plot) ocean_color : specify ocean color (default is 'lightblue') grid_lines : chose whether gird lines are plotted on map (default is true) lat_grid : specify the latitude grid (default is 30 degree spacing) lon_grid : specify the longitude grid (default is 30 degree spacing) ''' if not has_cartopy: print('-W- cartopy must be installed to run ipmag.make_orthographic_map') return fig = plt.figure(figsize=figsize) map_projection = ccrs.Orthographic( central_longitude=central_longitude, central_latitude=central_latitude) ax = plt.axes(projection=map_projection) ax.set_global() if add_ocean == True: ax.add_feature(cartopy.feature.OCEAN, zorder=0, facecolor=ocean_color) if add_land == True: ax.add_feature(cartopy.feature.LAND, zorder=0, facecolor=land_color, edgecolor='black') if grid_lines == True: ax.gridlines(xlocs=lon_grid, ylocs=lat_grid, linewidth=1, color='black', linestyle='dotted') return ax
Function creates and returns an orthographic map projection using cartopy Example ------- >>> map_axis = make_orthographic_map(central_longitude=200,central_latitude=30) Optional Parameters ----------- central_longitude : central longitude of projection (default is 0) central_latitude : central latitude of projection (default is 0) figsize : size of the figure (default is 8x8) add_land : chose whether land is plotted on map (default is true) land_color : specify land color (default is 'tan') add_ocean : chose whether land is plotted on map (default is False, change to True to plot) ocean_color : specify ocean color (default is 'lightblue') grid_lines : chose whether gird lines are plotted on map (default is true) lat_grid : specify the latitude grid (default is 30 degree spacing) lon_grid : specify the longitude grid (default is 30 degree spacing)
def process_track(self, track, frame_size=400, hop_size=160, sr=None, start=0, end=float('inf'), utterance=None, corpus=None): """ Process the track in **offline** mode, in one go. Args: track (Track): The track to process. frame_size (int): The number of samples per frame. hop_size (int): The number of samples between two frames. sr (int): Use the given sampling rate. If ``None``, uses the native sampling rate from the underlying data. start (float): The point within the track in seconds, to start processing from. end (float): The point within the track in seconds, to end processing. utterance (Utterance): The utterance that is associated with this track, if available. corpus (Corpus): The corpus this track is part of, if available. Returns: np.ndarray: The processed features. """ frame_settings = units.FrameSettings(frame_size, hop_size) if end != float('inf'): samples = track.read_samples(sr=sr, offset=start, duration=end-start) else: samples = track.read_samples(sr=sr, offset=start) if sr is None: sr = track.sampling_rate if samples.size <= 0: raise ValueError('Track {} has no samples'.format(track.idx)) # Pad with zeros to match frames num_frames = frame_settings.num_frames(samples.size) num_pad_samples = (num_frames - 1) * hop_size + frame_size if num_pad_samples > samples.size: samples = np.pad(samples, (0, num_pad_samples - samples.size), mode='constant', constant_values=0) # Get sampling-rate if not given sampling_rate = sr or utterance.sampling_rate frames = librosa.util.frame(samples, frame_length=frame_size, hop_length=hop_size).T return self.process_frames(frames, sampling_rate, 0, last=True, utterance=utterance, corpus=corpus)
Process the track in **offline** mode, in one go. Args: track (Track): The track to process. frame_size (int): The number of samples per frame. hop_size (int): The number of samples between two frames. sr (int): Use the given sampling rate. If ``None``, uses the native sampling rate from the underlying data. start (float): The point within the track in seconds, to start processing from. end (float): The point within the track in seconds, to end processing. utterance (Utterance): The utterance that is associated with this track, if available. corpus (Corpus): The corpus this track is part of, if available. Returns: np.ndarray: The processed features.
def delete_file(f): """Delete the given file :param f: the file to delete :type f: :class:`JB_File` :returns: None :rtype: None :raises: :class:`OSError` """ fp = f.get_fullpath() log.info("Deleting file %s", fp) os.remove(fp)
Delete the given file :param f: the file to delete :type f: :class:`JB_File` :returns: None :rtype: None :raises: :class:`OSError`
def get_names_in_namespace_page(namespace_id, offset, count, proxy=None, hostport=None): """ Get a page of names in a namespace Returns the list of names on success Returns {'error': ...} on error """ assert proxy or hostport, 'Need proxy or hostport' if proxy is None: proxy = connect_hostport(hostport) assert count <= 100, 'Page too big: {}'.format(count) names_schema = { 'type': 'object', 'properties': { 'names': { 'type': 'array', 'items': { 'type': 'string', 'uniqueItems': True }, }, }, 'required': [ 'names', ], } schema = json_response_schema( names_schema ) resp = {} try: resp = proxy.get_names_in_namespace(namespace_id, offset, count) resp = json_validate(schema, resp) if json_is_error(resp): return resp # must be valid names valid_names = [] for n in resp['names']: if not is_name_valid(str(n)): log.error('Invalid name "{}"'.format(str(n))) else: valid_names.append(n) return valid_names except ValidationError as e: if BLOCKSTACK_DEBUG: log.exception(e) resp = {'error': 'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.', 'http_status': 502} return resp except socket.timeout: log.error("Connection timed out") resp = {'error': 'Connection to remote host timed out.', 'http_status': 503} return resp except socket.error as se: log.error("Connection error {}".format(se.errno)) resp = {'error': 'Connection to remote host failed.', 'http_status': 502} return resp except Exception as ee: if BLOCKSTACK_DEBUG: log.exception(ee) log.error("Caught exception while connecting to Blockstack node: {}".format(ee)) resp = {'error': 'Failed to contact Blockstack node. Try again with `--debug`.', 'http_status': 500} return resp
Get a page of names in a namespace Returns the list of names on success Returns {'error': ...} on error
def _from_binary_stdinfo(cls, binary_stream): """See base class.""" ''' TIMESTAMPS(32) Creation time - 8 File altered time - 8 MFT/Metadata altered time - 8 Accessed time - 8 Flags - 4 (FileInfoFlags) Maximum number of versions - 4 Version number - 4 Class id - 4 Owner id - 4 (NTFS 3+) Security id - 4 (NTFS 3+) Quota charged - 8 (NTFS 3+) Update Sequence Number (USN) - 8 (NTFS 3+) ''' if len(binary_stream) == cls._REPR.size: #check if it is v3 by size of the stram t_created, t_changed, t_mft_changed, t_accessed, flags, m_ver, ver, \ c_id, o_id, s_id, quota_charged, usn = cls._REPR.unpack(binary_stream) nw_obj = cls( ( Timestamps((convert_filetime(t_created), convert_filetime(t_changed), convert_filetime(t_mft_changed), convert_filetime(t_accessed)) ), FileInfoFlags(flags), m_ver, ver, c_id, o_id, s_id, quota_charged, usn)) else: #if the content is not using v3 extension, added the missing stuff for consistency t_created, t_changed, t_mft_changed, t_accessed, flags, m_ver, ver, \ c_id = cls._REPR_NO_NFTS_3_EXTENSION.unpack(binary_stream) nw_obj = cls( ( Timestamps((convert_filetime(t_created), convert_filetime(t_changed), convert_filetime(t_mft_changed), convert_filetime(t_accessed)) ), FileInfoFlags(flags), m_ver, ver, c_id, None, None, None, None)) _MOD_LOGGER.debug("Attempted to unpack STANDARD_INFORMATION from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj) return nw_obj
See base class.
def getJob(self, jobID): """ returns the results or status of a job """ url = self._url + "/jobs/%s" % (jobID) return GPJob(url=url, securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url)
returns the results or status of a job
def find_n75(contig_lengths_dict, genome_length_dict): """ Calculate the N75 for each strain. N75 is defined as the largest contig such that at least 3/4 of the total genome size is contained in contigs equal to or larger than this contig :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :param genome_length_dict: dictionary of strain name: total genome length :return: n75_dict: dictionary of strain name: N75 """ # Initialise the dictionary n75_dict = dict() for file_name, contig_lengths in contig_lengths_dict.items(): currentlength = 0 for contig_length in contig_lengths: currentlength += contig_length # If the current length is now greater than the 3/4 of the total genome length, the current contig length # is the N75 if currentlength >= genome_length_dict[file_name] * 0.75: n75_dict[file_name] = contig_length break return n75_dict
Calculate the N75 for each strain. N75 is defined as the largest contig such that at least 3/4 of the total genome size is contained in contigs equal to or larger than this contig :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :param genome_length_dict: dictionary of strain name: total genome length :return: n75_dict: dictionary of strain name: N75
def http_error_default(self, url, fp, errcode, errmsg, headers): """Default error handling -- don't raise an exception.""" return addinfourl(fp, headers, "http:" + url, errcode)
Default error handling -- don't raise an exception.
def _query_init(k, oracle, query, method='all'): """A helper function for query-matching function initialization.""" if method == 'all': a = np.subtract(query, [oracle.f_array[t] for t in oracle.latent[oracle.data[k]]]) dvec = (a * a).sum(axis=1) # Could skip the sqrt _d = dvec.argmin() return oracle.latent[oracle.data[k]][_d], dvec[_d] else: a = np.subtract(query, oracle.f_array[k]) dvec = (a * a).sum() # Could skip the sqrt return k, dvec
A helper function for query-matching function initialization.
def remove_api_key_from_groups(self, api_key, body, **kwargs): # noqa: E501 """Remove API key from groups. # noqa: E501 An endpoint for removing API key from groups. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/api-keys/{apikey-id}/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.remove_api_key_from_groups(api_key, body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str api_key: The ID of the API key to be removed from the group. (required) :param list[str] body: A list of IDs of the groups to be updated. (required) :return: UpdatedResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.remove_api_key_from_groups_with_http_info(api_key, body, **kwargs) # noqa: E501 else: (data) = self.remove_api_key_from_groups_with_http_info(api_key, body, **kwargs) # noqa: E501 return data
Remove API key from groups. # noqa: E501 An endpoint for removing API key from groups. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/api-keys/{apikey-id}/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.remove_api_key_from_groups(api_key, body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str api_key: The ID of the API key to be removed from the group. (required) :param list[str] body: A list of IDs of the groups to be updated. (required) :return: UpdatedResponse If the method is called asynchronously, returns the request thread.
def log(**data): """RPC method for logging events Makes entry with new account creating Return None """ # Get data from request body entry = { "module": data["params"]["module"], "event": data["params"]["event"], "timestamp": data["params"]["timestamp"], "arguments": data["params"]["arguments"] } # Call create metod for writing data to database history.create(entry)
RPC method for logging events Makes entry with new account creating Return None
def layers(self): '''Construct Keras input layers for the given transformer Returns ------- layers : {field: keras.layers.Input} A dictionary of keras input layers, keyed by the corresponding field keys. ''' from keras.layers import Input L = dict() for key in self.fields: L[key] = Input(name=key, shape=self.fields[key].shape, dtype=self.fields[key].dtype) return L
Construct Keras input layers for the given transformer Returns ------- layers : {field: keras.layers.Input} A dictionary of keras input layers, keyed by the corresponding field keys.
def _init_log(level=logging.DEBUG): """Initialise the logging object. Args: level (int): Logging level. Returns: Logger: Python logging object. """ log = logging.getLogger(__file__) log.setLevel(level) handler = logging.StreamHandler(sys.stdout) handler.setLevel(level) formatter = logging.Formatter('%(asctime)s: %(message)s', '%Y/%m/%d-%H:%M:%S') handler.setFormatter(formatter) log.addHandler(handler) return log
Initialise the logging object. Args: level (int): Logging level. Returns: Logger: Python logging object.
def remove_feature_flag_accounts(self, feature, account_id): """ Remove feature flag. Remove feature flag for a given Account, Course, or User. (Note that the flag must be defined on the Account, Course, or User directly.) The object will then inherit the feature flags from a higher account, if any exist. If this flag was 'on' or 'off', then lower-level account flags that were masked by this one will apply again. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # REQUIRED - PATH - feature """ID""" path["feature"] = feature self.logger.debug("DELETE /api/v1/accounts/{account_id}/features/flags/{feature} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/accounts/{account_id}/features/flags/{feature}".format(**path), data=data, params=params, single_item=True)
Remove feature flag. Remove feature flag for a given Account, Course, or User. (Note that the flag must be defined on the Account, Course, or User directly.) The object will then inherit the feature flags from a higher account, if any exist. If this flag was 'on' or 'off', then lower-level account flags that were masked by this one will apply again.
def parse(input, identifier: str = None, use_cache=False, clear_cache=True, pattern="*.qface", profile=EProfile.FULL): """Input can be either a file or directory or a list of files or directory. A directory will be parsed recursively. The function returns the resulting system. Stores the result of the run in the domain cache named after the identifier. :param path: directory to parse :param identifier: identifies the parse run. Used to name the cache :param clear_cache: clears the domain cache (defaults to true) """ inputs = input if isinstance(input, (list, tuple)) else [input] logger.debug('parse input={0}'.format(inputs)) identifier = 'system' if not identifier else identifier system = System() cache = None if use_cache: cache = shelve.open('qface.cache') if identifier in cache and clear_cache: del cache[identifier] if identifier in cache: # use the cached domain model system = cache[identifier] # if domain model not cached generate it for input in inputs: path = Path.getcwd() / str(input) if path.isfile(): FileSystem.parse_document(path, system) else: for document in path.walkfiles(pattern): FileSystem.parse_document(document, system) if use_cache: cache[identifier] = system return system
Input can be either a file or directory or a list of files or directory. A directory will be parsed recursively. The function returns the resulting system. Stores the result of the run in the domain cache named after the identifier. :param path: directory to parse :param identifier: identifies the parse run. Used to name the cache :param clear_cache: clears the domain cache (defaults to true)
def is_draft_version(self): """ Return if this version is the draft version of a layer """ pub_ver = getattr(self, 'published_version', None) latest_ver = getattr(self, 'latest_version', None) this_ver = getattr(self, 'this_version', None) return this_ver and latest_ver and (this_ver == latest_ver) and (latest_ver != pub_ver)
Return if this version is the draft version of a layer
def display_image(self, reset=1): """Utility routine used to display an updated frame from a framebuffer. """ try: fb = self.server.controller.get_frame(self.frame) except KeyError: # the selected frame does not exist, create it fb = self.server.controller.init_frame(self.frame) if not fb.height: width = fb.width height = int(len(fb.buffer) / width) fb.height = height # display the image if (len(fb.buffer) > 0) and (height > 0): self.server.controller.display(self.frame, width, height, True) else: self.server.controller.display(self.frame, fb.width, fb.height, False)
Utility routine used to display an updated frame from a framebuffer.
def get_gain(data, attr, class_attr, method=DEFAULT_DISCRETE_METRIC, only_sub=0, prefer_fewer_values=False, entropy_func=None): """ Calculates the information gain (reduction in entropy) that would result by splitting the data on the chosen attribute (attr). Parameters: prefer_fewer_values := Weights the gain by the count of the attribute's unique values. If multiple attributes have the same gain, but one has slightly fewer attributes, this will cause the one with fewer attributes to be preferred. """ entropy_func = entropy_func or entropy val_freq = defaultdict(float) subset_entropy = 0.0 # Calculate the frequency of each of the values in the target attribute for record in data: val_freq[record.get(attr)] += 1.0 # Calculate the sum of the entropy for each subset of records weighted # by their probability of occuring in the training set. for val in val_freq.keys(): val_prob = val_freq[val] / sum(val_freq.values()) data_subset = [record for record in data if record.get(attr) == val] e = entropy_func(data_subset, class_attr, method=method) subset_entropy += val_prob * e if only_sub: return subset_entropy # Subtract the entropy of the chosen attribute from the entropy of the # whole data set with respect to the target attribute (and return it) main_entropy = entropy_func(data, class_attr, method=method) # Prefer gains on attributes with fewer values. if prefer_fewer_values: # n = len(val_freq) # w = (n+1)/float(n)/2 #return (main_entropy - subset_entropy)*w return ((main_entropy - subset_entropy), 1./len(val_freq)) else: return (main_entropy - subset_entropy)
Calculates the information gain (reduction in entropy) that would result by splitting the data on the chosen attribute (attr). Parameters: prefer_fewer_values := Weights the gain by the count of the attribute's unique values. If multiple attributes have the same gain, but one has slightly fewer attributes, this will cause the one with fewer attributes to be preferred.
def check_available(self): """ Check for availability of a layer and provide run metrics. """ success = True start_time = datetime.datetime.utcnow() message = '' LOGGER.debug('Checking layer id %s' % self.id) signals.post_save.disconnect(layer_post_save, sender=Layer) try: self.update_thumbnail() except ValueError, err: # caused by update_thumbnail() # self.href is empty in arcserver.ExportMap if str(err).startswith("unknown url type:"): LOGGER.debug('Thumbnail can not be updated: %s' % str(err)) except Exception, err: message = str(err) success = False signals.post_save.connect(layer_post_save, sender=Layer) end_time = datetime.datetime.utcnow() delta = end_time - start_time response_time = '%s.%s' % (delta.seconds, delta.microseconds) check = Check( content_object=self, success=success, response_time=response_time, message=message ) check.save() LOGGER.debug('Layer checked in %s seconds, status is %s' % (response_time, success)) return success, message
Check for availability of a layer and provide run metrics.
def _handle_rundebug_from_shell(cmd_line): """ Handles all commands that take a filename and 0 or more extra arguments. Passes the command to backend. (Debugger plugin may also use this method) """ command, args = parse_shell_command(cmd_line) if len(args) >= 1: get_workbench().get_editor_notebook().save_all_named_editors() origcommand=command if command == "Ev3RemoteRun": command="Run" if command == "Ev3RemoteDebug": command="Debug" cmd = ToplevelCommand(command=command, filename=args[0], args=args[1:]) if origcommand == "Ev3RemoteRun" or origcommand == "Ev3RemoteDebug": cmd.environment={ "EV3MODE" : "remote", "EV3IP": get_workbench().get_option("ev3.ip") } if os.path.isabs(cmd.filename): cmd.full_filename = cmd.filename else: runner=get_runner() cmd.full_filename = os.path.join(runner.get_cwd(), cmd.filename) if command in ["Run", "run", "Debug", "debug"]: with tokenize.open(cmd.full_filename) as fp: cmd.source = fp.read() get_runner().send_command(cmd) else: print_error_in_backend("Command '{}' takes at least one argument".format(command))
Handles all commands that take a filename and 0 or more extra arguments. Passes the command to backend. (Debugger plugin may also use this method)
def delete(self): """ Deletes the current instance. This assumes that we know what we're doing, and have a primary key in our data already. If this is a new instance, then we'll let the user know with an Exception """ if self._new: raise Exception("This is a new object, %s not in data, \ indicating this entry isn't stored." % self.primaryKey) r.table(self.table).get(self._data[self.primaryKey]) \ .delete(durability=self.durability).run(self._conn) return True
Deletes the current instance. This assumes that we know what we're doing, and have a primary key in our data already. If this is a new instance, then we'll let the user know with an Exception
def load_metascenario(self, scenario_list): """Load one or more scenarios from a list. Each entry in scenario_list should be a dict containing at least a name key and an optional tile key and args key. If tile is present and its value is not None, the scenario specified will be loaded into the given tile only. Otherwise it will be loaded into the entire device. If the args key is specified is will be passed as keyword arguments to load_scenario. Args: scenario_list (list): A list of dicts for each scenario that should be loaded. """ for scenario in scenario_list: name = scenario.get('name') if name is None: raise DataError("Scenario in scenario list is missing a name parameter", scenario=scenario) tile_address = scenario.get('tile') args = scenario.get('args', {}) dest = self if tile_address is not None: dest = self._tiles.get(tile_address) if dest is None: raise DataError("Attempted to load a scenario into a tile address that does not exist", address=tile_address, valid_addresses=list(self._tiles)) dest.load_scenario(name, **args)
Load one or more scenarios from a list. Each entry in scenario_list should be a dict containing at least a name key and an optional tile key and args key. If tile is present and its value is not None, the scenario specified will be loaded into the given tile only. Otherwise it will be loaded into the entire device. If the args key is specified is will be passed as keyword arguments to load_scenario. Args: scenario_list (list): A list of dicts for each scenario that should be loaded.
def _set_redist_bgp(self, v, load=False): """ Setter method for redist_bgp, mapped from YANG variable /isis_state/router_isis_config/is_address_family_v4/redist_bgp (container) If this variable is read-only (config: false) in the source YANG file, then _set_redist_bgp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_redist_bgp() directly. YANG Description: IS-IS redistribution config for BGP routes """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=redist_bgp.redist_bgp, is_container='container', presence=False, yang_name="redist-bgp", rest_name="redist-bgp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-bgp-redistribution', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """redist_bgp must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=redist_bgp.redist_bgp, is_container='container', presence=False, yang_name="redist-bgp", rest_name="redist-bgp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-bgp-redistribution', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""", }) self.__redist_bgp = t if hasattr(self, '_set'): self._set()
Setter method for redist_bgp, mapped from YANG variable /isis_state/router_isis_config/is_address_family_v4/redist_bgp (container) If this variable is read-only (config: false) in the source YANG file, then _set_redist_bgp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_redist_bgp() directly. YANG Description: IS-IS redistribution config for BGP routes
def unorm(v1): """ Normalize a double precision 3-vector and return its magnitude. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/unorm_c.html :param v1: Vector to be normalized. :type v1: 3-Element Array of floats :return: Unit vector of v1, Magnitude of v1. :rtype: tuple """ v1 = stypes.toDoubleVector(v1) vout = stypes.emptyDoubleVector(3) vmag = ctypes.c_double() libspice.unorm_c(v1, vout, ctypes.byref(vmag)) return stypes.cVectorToPython(vout), vmag.value
Normalize a double precision 3-vector and return its magnitude. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/unorm_c.html :param v1: Vector to be normalized. :type v1: 3-Element Array of floats :return: Unit vector of v1, Magnitude of v1. :rtype: tuple
def record_move_fields(rec, tag, field_positions_local, field_position_local=None): """ Move some fields to the position specified by 'field_position_local'. :param rec: a record structure as returned by create_record() :param tag: the tag of the fields to be moved :param field_positions_local: the positions of the fields to move :param field_position_local: insert the field before that field_position_local. If unspecified, appends the fields :return: the field_position_local is the operation was successful """ fields = record_delete_fields( rec, tag, field_positions_local=field_positions_local) return record_add_fields( rec, tag, fields, field_position_local=field_position_local)
Move some fields to the position specified by 'field_position_local'. :param rec: a record structure as returned by create_record() :param tag: the tag of the fields to be moved :param field_positions_local: the positions of the fields to move :param field_position_local: insert the field before that field_position_local. If unspecified, appends the fields :return: the field_position_local is the operation was successful
def nonparabolicity(self, **kwargs): ''' Returns the Kane band nonparabolicity parameter for the Gamma-valley. ''' Eg = self.Eg_Gamma(**kwargs) meff = self.meff_e_Gamma(**kwargs) T = kwargs.get('T', 300.) return k*T/Eg * (1 - meff)**2
Returns the Kane band nonparabolicity parameter for the Gamma-valley.
def insert_instance_template(self, body, request_id=None, project_id=None): """ Inserts instance template using body specified Must be called with keyword arguments rather than positional. :param body: Instance template representation as object according to https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates :type body: dict :param request_id: Optional, unique request_id that you might add to achieve full idempotence (for example when client call times out repeating the request with the same request id will not create a new instance template again) It should be in UUID format as defined in RFC 4122 :type request_id: str :param project_id: Optional, Google Cloud Platform project ID where the Compute Engine Instance exists. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: None """ response = self.get_conn().instanceTemplates().insert( project=project_id, body=body, requestId=request_id ).execute(num_retries=self.num_retries) try: operation_name = response["name"] except KeyError: raise AirflowException( "Wrong response '{}' returned - it should contain " "'name' field".format(response)) self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
Inserts instance template using body specified Must be called with keyword arguments rather than positional. :param body: Instance template representation as object according to https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates :type body: dict :param request_id: Optional, unique request_id that you might add to achieve full idempotence (for example when client call times out repeating the request with the same request id will not create a new instance template again) It should be in UUID format as defined in RFC 4122 :type request_id: str :param project_id: Optional, Google Cloud Platform project ID where the Compute Engine Instance exists. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: None
def skullstrip_template(dset,template,prefix=None,suffix=None,dilate=0): '''Takes the raw anatomy ``dset``, aligns it to a template brain, and applies a templated skullstrip. Should produce fairly reliable skullstrips as long as there is a decent amount of normal brain and the overall shape of the brain is normal-ish''' if suffix==None: suffix = '_sstemplate' if prefix==None: prefix = nl.suffix(dset,suffix) if not os.path.exists(prefix): with nl.notify('Running template-based skull-strip on %s' % dset): dset = os.path.abspath(dset) template = os.path.abspath(template) tmp_dir = tempfile.mkdtemp() cwd = os.getcwd() with nl.run_in(tmp_dir): nl.affine_align(template,dset,skull_strip=None,cost='mi',opts=['-nmatch','100%']) nl.run(['3dQwarp','-minpatch','20','-penfac','10','-noweight','-source',nl.suffix(template,'_aff'),'-base',dset,'-prefix',nl.suffix(template,'_qwarp')],products=nl.suffix(template,'_qwarp')) info = nl.dset_info(nl.suffix(template,'_qwarp')) max_value = info.subbricks[0]['max'] nl.calc([dset,nl.suffix(template,'_qwarp')],'a*step(b-%f*0.05)'%max_value,prefix) shutil.move(prefix,cwd) shutil.rmtree(tmp_dir)
Takes the raw anatomy ``dset``, aligns it to a template brain, and applies a templated skullstrip. Should produce fairly reliable skullstrips as long as there is a decent amount of normal brain and the overall shape of the brain is normal-ish
def is_cell_separator(self, cursor=None, block=None): """Return True if cursor (or text block) is on a block separator""" assert cursor is not None or block is not None if cursor is not None: cursor0 = QTextCursor(cursor) cursor0.select(QTextCursor.BlockUnderCursor) text = to_text_string(cursor0.selectedText()) else: text = to_text_string(block.text()) if self.cell_separators is None: return False else: return text.lstrip().startswith(self.cell_separators)
Return True if cursor (or text block) is on a block separator
def exec_workflow(self, model, record_id, signal): """Execute the workflow `signal` on the instance having the ID `record_id` of `model`. *Python 2:* :raise: :class:`odoorpc.error.RPCError` :raise: :class:`odoorpc.error.InternalError` (if not logged) :raise: `urllib2.URLError` (connection error) *Python 3:* :raise: :class:`odoorpc.error.RPCError` :raise: :class:`odoorpc.error.InternalError` (if not logged) :raise: `urllib.error.URLError` (connection error) """ if tools.v(self.version)[0] >= 11: raise DeprecationWarning( u"Workflows have been removed in Odoo >= 11.0") self._check_logged_user() # Execute the workflow query args_to_send = [self.env.db, self.env.uid, self._password, model, signal, record_id] data = self.json( '/jsonrpc', {'service': 'object', 'method': 'exec_workflow', 'args': args_to_send}) return data.get('result')
Execute the workflow `signal` on the instance having the ID `record_id` of `model`. *Python 2:* :raise: :class:`odoorpc.error.RPCError` :raise: :class:`odoorpc.error.InternalError` (if not logged) :raise: `urllib2.URLError` (connection error) *Python 3:* :raise: :class:`odoorpc.error.RPCError` :raise: :class:`odoorpc.error.InternalError` (if not logged) :raise: `urllib.error.URLError` (connection error)
def set_row_min_height(self, y: int, min_height: int): """Sets a minimum height for blocks in the row with coordinate y.""" if y < 0: raise IndexError('y < 0') self._min_heights[y] = min_height
Sets a minimum height for blocks in the row with coordinate y.
def loadTopicPageFromFile(self, fname): """ load topic page from an existing file """ assert os.path.exists(fname) f = open(fname, "r", encoding="utf-8") self.topicPage = json.load(f)
load topic page from an existing file
def to_xy_arrays(self, dtype=np.float32): """ Convert this object to an iterable of ``(M,2)`` arrays of points. This is the inverse of :func:`imgaug.augmentables.lines.LineStringsOnImage.from_xy_array`. Parameters ---------- dtype : numpy.dtype, optional Desired output datatype of the ndarray. Returns ------- list of ndarray The arrays of point coordinates, each given as ``(M,2)``. """ from .. import dtypes as iadt return [iadt.restore_dtypes_(np.copy(ls.coords), dtype) for ls in self.line_strings]
Convert this object to an iterable of ``(M,2)`` arrays of points. This is the inverse of :func:`imgaug.augmentables.lines.LineStringsOnImage.from_xy_array`. Parameters ---------- dtype : numpy.dtype, optional Desired output datatype of the ndarray. Returns ------- list of ndarray The arrays of point coordinates, each given as ``(M,2)``.
def create_audit_event(self, code='AUDIT'): """Creates a generic auditing Event logging the changes between saves and the initial data in creates. Kwargs: code (str): The code to set the new Event to. Returns: Event: A new event with relevant info inserted into it """ event = self._meta.event_model( code=code, model=self.__class__.__name__, ) # Use the logged in User, if possible if current_user: event.created_by = current_user.get_id() self.copy_foreign_keys(event) self.populate_audit_fields(event) return event
Creates a generic auditing Event logging the changes between saves and the initial data in creates. Kwargs: code (str): The code to set the new Event to. Returns: Event: A new event with relevant info inserted into it
def convert_bytes(n): """ Convert a size number to 'K', 'M', .etc """ symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') prefix = {} for i, s in enumerate(symbols): prefix[s] = 1 << (i + 1) * 10 for s in reversed(symbols): if n >= prefix[s]: value = float(n) / prefix[s] return '%.1f%s' % (value, s) return "%sB" % n
Convert a size number to 'K', 'M', .etc
def dimension_range(lower, upper, hard_range, soft_range, padding=None, log=False): """ Computes the range along a dimension by combining the data range with the Dimension soft_range and range. """ lower, upper = range_pad(lower, upper, padding, log) lower = max_range([(lower, None), (soft_range[0], None)])[0] upper = max_range([(None, upper), (None, soft_range[1])])[1] dmin, dmax = hard_range lower = lower if dmin is None or not isfinite(dmin) else dmin upper = upper if dmax is None or not isfinite(dmax) else dmax return lower, upper
Computes the range along a dimension by combining the data range with the Dimension soft_range and range.
def delete_files(self, selections) -> None: """Delete the network files corresponding to the given selections (e.g. a |list| of |str| objects or a |Selections| object).""" try: currentpath = self.currentpath for selection in selections: name = str(selection) if name == 'complete': continue if not name.endswith('.py'): name += '.py' path = os.path.join(currentpath, name) os.remove(path) except BaseException: objecttools.augment_excmessage( f'While trying to remove the network files of ' f'selections `{selections}`')
Delete the network files corresponding to the given selections (e.g. a |list| of |str| objects or a |Selections| object).
def find_proxy_plugin(component, plugin_name): """ Attempt to find a proxy plugin provided by a specific component Args: component (string): The name of the component that provides the plugin plugin_name (string): The name of the plugin to load Returns: TileBuxProxyPlugin: The plugin, if found, otherwise raises DataError """ reg = ComponentRegistry() plugins = reg.load_extensions('iotile.proxy_plugin', comp_filter=component, class_filter=TileBusProxyPlugin, product_name='proxy_plugin') for _name, plugin in plugins: if plugin.__name__ == plugin_name: return plugin raise DataError("Could not find proxy plugin module in registered components or installed distributions", component=component, name=plugin_name)
Attempt to find a proxy plugin provided by a specific component Args: component (string): The name of the component that provides the plugin plugin_name (string): The name of the plugin to load Returns: TileBuxProxyPlugin: The plugin, if found, otherwise raises DataError
def makemigrations(application, merge=False, dry_run=False, empty=False, extra_applications=None): """ Generate migrations """ from django.core.management import call_command apps = [application] if extra_applications: if isinstance(extra_applications, text_type): apps += [extra_applications] elif isinstance(extra_applications, list): apps += extra_applications for app in apps: call_command('makemigrations', *(app,), merge=merge, dry_run=dry_run, empty=empty)
Generate migrations
def get_prev_sibling_tags(mention): """Return the HTML tag of the Mention's previous siblings. Previous siblings are Mentions which are at the same level in the HTML tree as the given mention, but are declared before the given mention. If a candidate is passed in, only the previous siblings of its first Mention are considered in the calculation. :param mention: The Mention to evaluate :rtype: list of strings """ span = _to_span(mention) prev_sibling_tags = [] i = _get_node(span.sentence) while i.getprevious() is not None: prev_sibling_tags.insert(0, str(i.getprevious().tag)) i = i.getprevious() return prev_sibling_tags
Return the HTML tag of the Mention's previous siblings. Previous siblings are Mentions which are at the same level in the HTML tree as the given mention, but are declared before the given mention. If a candidate is passed in, only the previous siblings of its first Mention are considered in the calculation. :param mention: The Mention to evaluate :rtype: list of strings
def setup_icons(self, ): """Set all icons on buttons :returns: None :rtype: None :raises: None """ floppy_icon = get_icon('glyphicons_446_floppy_save.png', asicon=True) self.release_pb.setIcon(floppy_icon)
Set all icons on buttons :returns: None :rtype: None :raises: None
def stop(self): """Stop Modis and log it out of Discord.""" self.button_toggle_text.set("Start Modis") self.state = "off" logger.info("Stopping Discord Modis") from ._client import client asyncio.run_coroutine_threadsafe(client.logout(), client.loop) self.status_bar.set_status(0)
Stop Modis and log it out of Discord.
def check_settings_for_differences(old, new, as_bool=False, as_tri=False): """ Returns a subset of the env dictionary keys that differ, either being added, deleted or changed between old and new. """ assert not as_bool or not as_tri old = old or {} new = new or {} changes = set(k for k in set(new.iterkeys()).intersection(old.iterkeys()) if new[k] != old[k]) if changes and as_bool: return True added_keys = set(new.iterkeys()).difference(old.iterkeys()) if added_keys and as_bool: return True if not as_tri: changes.update(added_keys) deled_keys = set(old.iterkeys()).difference(new.iterkeys()) if deled_keys and as_bool: return True if as_bool: return False if not as_tri: changes.update(deled_keys) if as_tri: return added_keys, changes, deled_keys return changes
Returns a subset of the env dictionary keys that differ, either being added, deleted or changed between old and new.
def register_iq_response_future(self, from_, id_, fut): """ Register a future `fut` for an IQ stanza with type ``result`` or ``error`` from the :class:`~aioxmpp.JID` `from_` with the id `id_`. If the type of the IQ stanza is ``result``, the stanza is set as result to the future. If the type of the IQ stanza is ``error``, the stanzas error field is converted to an exception and set as the exception of the future. The future might also receive different exceptions: * :class:`.errors.ErroneousStanza`, if the response stanza received could not be parsed. Note that this exception is not emitted if the ``from`` address of the stanza is unset, because the code cannot determine whether a sender deliberately used an erroneous address to make parsing fail or no sender address was used. In the former case, an attacker could use that to inject a stanza which would be taken as a stanza from the peer server. Thus, the future will never be fulfilled in these cases. Also note that this exception does not derive from :class:`.errors.XMPPError`, as it cannot provide the same attributes. Instead, it dervies from :class:`.errors.StanzaError`, from which :class:`.errors.XMPPError` also derives; to catch all possible stanza errors, catching :class:`.errors.StanzaError` is sufficient and future-proof. * :class:`ConnectionError` if the stream is :meth:`stop`\\ -ped (only if SM is not enabled) or :meth:`close`\\ -ed. * Any :class:`Exception` which may be raised from :meth:`~.protocol.XMLStream.send_xso`, which are generally also :class:`ConnectionError` or at least :class:`OSError` subclasses. """ self._iq_response_map.add_listener( (from_, id_), StanzaErrorAwareListener( callbacks.FutureListener(fut) ) ) self._logger.debug("iq response future registered: from=%r, id=%r", from_, id_)
Register a future `fut` for an IQ stanza with type ``result`` or ``error`` from the :class:`~aioxmpp.JID` `from_` with the id `id_`. If the type of the IQ stanza is ``result``, the stanza is set as result to the future. If the type of the IQ stanza is ``error``, the stanzas error field is converted to an exception and set as the exception of the future. The future might also receive different exceptions: * :class:`.errors.ErroneousStanza`, if the response stanza received could not be parsed. Note that this exception is not emitted if the ``from`` address of the stanza is unset, because the code cannot determine whether a sender deliberately used an erroneous address to make parsing fail or no sender address was used. In the former case, an attacker could use that to inject a stanza which would be taken as a stanza from the peer server. Thus, the future will never be fulfilled in these cases. Also note that this exception does not derive from :class:`.errors.XMPPError`, as it cannot provide the same attributes. Instead, it dervies from :class:`.errors.StanzaError`, from which :class:`.errors.XMPPError` also derives; to catch all possible stanza errors, catching :class:`.errors.StanzaError` is sufficient and future-proof. * :class:`ConnectionError` if the stream is :meth:`stop`\\ -ped (only if SM is not enabled) or :meth:`close`\\ -ed. * Any :class:`Exception` which may be raised from :meth:`~.protocol.XMLStream.send_xso`, which are generally also :class:`ConnectionError` or at least :class:`OSError` subclasses.
def share(self, plotters, keys=None, draw=None, auto_update=False): """ Share the formatoptions of this plotter with others This method shares the formatoptions of this :class:`Plotter` instance with others to make sure that, if the formatoption of this changes, those of the others change as well Parameters ---------- plotters: list of :class:`Plotter` instances or a :class:`Plotter` The plotters to share the formatoptions with keys: string or iterable of strings The formatoptions to share, or group names of formatoptions to share all formatoptions of that group (see the :attr:`fmt_groups` property). If None, all formatoptions of this plotter are unshared. %(InteractiveBase.start_update.parameters.draw)s %(InteractiveBase.update.parameters.auto_update)s See Also -------- unshare, unshare_me""" auto_update = auto_update or not self.no_auto_update if isinstance(plotters, Plotter): plotters = [plotters] keys = self._set_sharing_keys(keys) for plotter in plotters: for key in keys: fmto = self._shared.get(key, getattr(self, key)) if not getattr(plotter, key) == fmto: plotter._shared[key] = getattr(self, key) fmto.shared.add(getattr(plotter, key)) # now exit if we are not initialized if self._initialized: self.update(force=keys, auto_update=auto_update, draw=draw) for plotter in plotters: if not plotter._initialized: continue old_registered = plotter._registered_updates.copy() plotter._registered_updates.clear() try: plotter.update(force=keys, auto_update=auto_update, draw=draw) except: raise finally: plotter._registered_updates.clear() plotter._registered_updates.update(old_registered) if draw is None: draw = rcParams['auto_draw'] if draw: self.draw() if rcParams['auto_show']: self.show()
Share the formatoptions of this plotter with others This method shares the formatoptions of this :class:`Plotter` instance with others to make sure that, if the formatoption of this changes, those of the others change as well Parameters ---------- plotters: list of :class:`Plotter` instances or a :class:`Plotter` The plotters to share the formatoptions with keys: string or iterable of strings The formatoptions to share, or group names of formatoptions to share all formatoptions of that group (see the :attr:`fmt_groups` property). If None, all formatoptions of this plotter are unshared. %(InteractiveBase.start_update.parameters.draw)s %(InteractiveBase.update.parameters.auto_update)s See Also -------- unshare, unshare_me
def _internal_declare_key_flags(flag_names, flag_values=FLAGS, key_flag_values=None): """Declares a flag as key for the calling module. Internal function. User code should call DECLARE_key_flag or ADOPT_module_key_flags instead. Args: flag_names: A list of strings that are names of already-registered Flag objects. flag_values: A FlagValues object that the flags listed in flag_names have registered with (the value of the flag_values argument from the DEFINE_* calls that defined those flags). This should almost never need to be overridden. key_flag_values: A FlagValues object that (among possibly many other things) keeps track of the key flags for each module. Default None means "same as flag_values". This should almost never need to be overridden. Raises: UnrecognizedFlagError: when we refer to a flag that was not defined yet. """ key_flag_values = key_flag_values or flag_values module = _helpers.GetCallingModule() for flag_name in flag_names: flag = flag_values.GetFlag(flag_name) # TODO(vrusinov): _RegisterKeyFlagForModule should be public. key_flag_values._RegisterKeyFlagForModule(module, flag)
Declares a flag as key for the calling module. Internal function. User code should call DECLARE_key_flag or ADOPT_module_key_flags instead. Args: flag_names: A list of strings that are names of already-registered Flag objects. flag_values: A FlagValues object that the flags listed in flag_names have registered with (the value of the flag_values argument from the DEFINE_* calls that defined those flags). This should almost never need to be overridden. key_flag_values: A FlagValues object that (among possibly many other things) keeps track of the key flags for each module. Default None means "same as flag_values". This should almost never need to be overridden. Raises: UnrecognizedFlagError: when we refer to a flag that was not defined yet.
def _storage_get_key_names(bucket, pattern): """ Get names of all storage keys in a specified bucket that match a pattern. """ return [item.metadata.name for item in _storage_get_keys(bucket, pattern)]
Get names of all storage keys in a specified bucket that match a pattern.
def _create_matrix(self, document, dictionary): """ Creates matrix of shape |unique words|×|sentences| where cells contains number of occurences of words (rows) in senteces (cols). """ sentences = document.sentences words_count = len(dictionary) sentences_count = len(sentences) if words_count < sentences_count: message = ( "Number of words (%d) is lower than number of sentences (%d). " "LSA algorithm may not work properly." ) warn(message % (words_count, sentences_count)) # create matrix |unique words|×|sentences| filled with zeroes matrix = numpy.zeros((words_count, sentences_count)) for col, sentence in enumerate(sentences): for word in map(self.stem_word, sentence.words): # only valid words is counted (not stop-words, ...) if word in dictionary: row = dictionary[word] matrix[row, col] += 1 return matrix
Creates matrix of shape |unique words|×|sentences| where cells contains number of occurences of words (rows) in senteces (cols).
def prepare_params(self): """ Prepare the parameters passed to the templatetag """ if self.options.resolve_fragment: self.fragment_name = self.node.fragment_name.resolve(self.context) else: self.fragment_name = str(self.node.fragment_name) # Remove quotes that surround the name for char in '\'\"': if self.fragment_name.startswith(char) or self.fragment_name.endswith(char): if self.fragment_name.startswith(char) and self.fragment_name.endswith(char): self.fragment_name = self.fragment_name[1:-1] break else: raise ValueError('Number of quotes around the fragment name is incoherent') self.expire_time = self.get_expire_time() if self.options.versioning: self.version = force_bytes(self.get_version()) self.vary_on = [template.Variable(var).resolve(self.context) for var in self.node.vary_on]
Prepare the parameters passed to the templatetag
def request(self, path, data=None, headers=None, method=None): """Performs a HTTP request to the Go server Args: path (str): The full path on the Go server to request. This includes any query string attributes. data (str, dict, bool, optional): If any data is present this request will become a POST request. headers (dict, optional): Headers to set for this particular request Raises: HTTPError: when the HTTP request fails. Returns: file like object: The response from a :func:`urllib2.urlopen` call """ if isinstance(data, str): data = data.encode('utf-8') response = urlopen(self._request(path, data=data, headers=headers, method=method)) self._set_session_cookie(response) return response
Performs a HTTP request to the Go server Args: path (str): The full path on the Go server to request. This includes any query string attributes. data (str, dict, bool, optional): If any data is present this request will become a POST request. headers (dict, optional): Headers to set for this particular request Raises: HTTPError: when the HTTP request fails. Returns: file like object: The response from a :func:`urllib2.urlopen` call
def post_event_discounts(self, id, **data): """ POST /events/:id/discounts/ Creates a new discount; returns the result as a :format:`discount` as the key ``discount``. """ return self.post("/events/{0}/discounts/".format(id), data=data)
POST /events/:id/discounts/ Creates a new discount; returns the result as a :format:`discount` as the key ``discount``.
def release(self, conn): """Release a previously acquired connection. The connection is put back into the pool.""" self._pool_lock.acquire() self._pool.put(ConnectionWrapper(self._pool, conn)) self._current_acquired -= 1 self._pool_lock.release()
Release a previously acquired connection. The connection is put back into the pool.
def getenv(option, default=undefined, cast=undefined): """ Return the value for option or default if defined. """ # We can't avoid __contains__ because value may be empty. if option in os.environ: value = os.environ[option] else: if isinstance(default, Undefined): raise UndefinedValueError('{} not found. Declare it as envvar or define a default value.'.format(option)) value = default if isinstance(cast, Undefined): return value if cast is bool: value = _cast_boolean(value) elif cast is list: value = [x for x in value.split(',') if x] else: value = cast(value) return value
Return the value for option or default if defined.
def render(self, name, value, attrs=None): '''Render the widget as HTML inputs for display on a form. :param name: form field base name :param value: date value :param attrs: - unused :returns: HTML text with three inputs for year/month/day ''' # expects a value in format YYYY-MM-DD or YYYY-MM or YYYY (or empty/None) year, month, day = 'YYYY', 'MM', 'DD' if value: # use the regular expression to pull out year, month, and day values # if regular expression does not match, inputs will be empty match = W3C_DATE_RE.match(value) if match: date_parts = match.groupdict() year = date_parts['year'] month = date_parts['month'] day = date_parts['day'] year_html = self.create_textinput(name, self.year_field, year, size=4, title='4-digit year', onClick='javascript:if(this.value == "YYYY") { this.value = "" };') month_html = self.create_textinput(name, self.month_field, month, size=2, title='2-digit month', onClick='javascript:if(this.value == "MM") { this.value = "" };') day_html = self.create_textinput(name, self.day_field, day, size=2, title='2-digit day', onClick='javascript:if(this.value == "DD") { this.value = "" };') # display widget fields in YYYY-MM-DD order to match W3C date format, # and putting required field(s) on the left output = [year_html, month_html, day_html] return mark_safe(u' / \n'.join(output))
Render the widget as HTML inputs for display on a form. :param name: form field base name :param value: date value :param attrs: - unused :returns: HTML text with three inputs for year/month/day
def get_stderr(self, tail=None): """ Returns current total output written to standard error. :param tail: Return this number of most-recent lines. :return: copy of stderr stream """ if self.finished(): self.join_threads() while not self.stderr_q.empty(): self.stderr_l.append(self.stderr_q.get_nowait()) if tail is None: tail = len(self.stderr_l) return _py2_and_3_joiner('\n', self.stderr_l[:tail])
Returns current total output written to standard error. :param tail: Return this number of most-recent lines. :return: copy of stderr stream
def vprintf(self, alevel, format, *args): ''' A verbosity-aware printf. ''' if self._verbosity and self._verbosity >= alevel: sys.stdout.write(format % args)
A verbosity-aware printf.
def fit_transform(self, X, y=None): """Fit OneHotEncoder to X, then transform X. Equivalent to self.fit(X).transform(X), but more convenient and more efficient. See fit for the parameters, transform for the return value. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Dense array or sparse matrix. y: array-like {n_samples,} (Optional, ignored) Feature labels """ if self.categorical_features == "auto": self.categorical_features = auto_select_categorical_features(X, threshold=self.threshold) return _transform_selected( X, self._fit_transform, self.categorical_features, copy=True )
Fit OneHotEncoder to X, then transform X. Equivalent to self.fit(X).transform(X), but more convenient and more efficient. See fit for the parameters, transform for the return value. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Dense array or sparse matrix. y: array-like {n_samples,} (Optional, ignored) Feature labels