code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def non_blocking(func): """Decorator to run a function in a different thread. It can be used to execute a command in a non-blocking way like this:: @non_blocking def add_one(n): print 'starting' import time time.sleep(2) print 'ending' return n+1 thread = add_one(5) # Starts the function result = thread.join() # Waits for it to complete print result """ from functools import wraps @wraps(func) def non_blocking_version(*args, **kwargs): t = ReturnThread(target=func, args=args, kwargs=kwargs) t.start() return t return non_blocking_version
Decorator to run a function in a different thread. It can be used to execute a command in a non-blocking way like this:: @non_blocking def add_one(n): print 'starting' import time time.sleep(2) print 'ending' return n+1 thread = add_one(5) # Starts the function result = thread.join() # Waits for it to complete print result
def verify_rsa_sha1(request, rsa_public_key): """Verify a RSASSA-PKCS #1 v1.5 base64 encoded signature. Per `section 3.4.3`_ of the spec. Note this method requires the jwt and cryptography libraries. .. _`section 3.4.3`: https://tools.ietf.org/html/rfc5849#section-3.4.3 To satisfy `RFC2616 section 5.2`_ item 1, the request argument's uri attribute MUST be an absolute URI whose netloc part identifies the origin server or gateway on which the resource resides. Any Host item of the request argument's headers dict attribute will be ignored. .. _`RFC2616 section 5.2`: https://tools.ietf.org/html/rfc2616#section-5.2 """ norm_params = normalize_parameters(request.params) bs_uri = base_string_uri(request.uri) sig_base_str = signature_base_string(request.http_method, bs_uri, norm_params).encode('utf-8') sig = binascii.a2b_base64(request.signature.encode('utf-8')) alg = _jwt_rs1_signing_algorithm() key = _prepare_key_plus(alg, rsa_public_key) verify_ok = alg.verify(sig_base_str, key, sig) if not verify_ok: log.debug('Verify RSA-SHA1 failed: signature base string: %s', sig_base_str) return verify_ok
Verify a RSASSA-PKCS #1 v1.5 base64 encoded signature. Per `section 3.4.3`_ of the spec. Note this method requires the jwt and cryptography libraries. .. _`section 3.4.3`: https://tools.ietf.org/html/rfc5849#section-3.4.3 To satisfy `RFC2616 section 5.2`_ item 1, the request argument's uri attribute MUST be an absolute URI whose netloc part identifies the origin server or gateway on which the resource resides. Any Host item of the request argument's headers dict attribute will be ignored. .. _`RFC2616 section 5.2`: https://tools.ietf.org/html/rfc2616#section-5.2
def parse_message(message, nodata=False): """Parse df message from bytearray. @message - message data @nodata - do not load data @return - [binary header, metadata, binary data] """ header = read_machine_header(message) h_len = __get_machine_header_length(header) meta_raw = message[h_len:h_len + header['meta_len']] meta = __parse_meta(meta_raw, header) data_start = h_len + header['meta_len'] data = b'' if not nodata: data = __decompress( meta, message[data_start:data_start + header['data_len']] ) return header, meta, data
Parse df message from bytearray. @message - message data @nodata - do not load data @return - [binary header, metadata, binary data]
def _get_missing_trees(self, path, root_tree): """ Creates missing ``Tree`` objects for the given path. :param path: path given as a string. It may be a path to a file node (i.e. ``foo/bar/baz.txt``) or directory path - in that case it must end with slash (i.e. ``foo/bar/``). :param root_tree: ``dulwich.objects.Tree`` object from which we start traversing (should be commit's root tree) """ dirpath = posixpath.split(path)[0] dirs = dirpath.split('/') if not dirs or dirs == ['']: return [] def get_tree_for_dir(tree, dirname): for name, mode, id in tree.iteritems(): if name == dirname: obj = self.repository._repo[id] if isinstance(obj, objects.Tree): return obj else: raise RepositoryError("Cannot create directory %s " "at tree %s as path is occupied and is not a " "Tree" % (dirname, tree)) return None trees = [] parent = root_tree for dirname in dirs: tree = get_tree_for_dir(parent, dirname) if tree is None: tree = objects.Tree() dirmode = 040000 parent.add(dirmode, dirname, tree.id) parent = tree # Always append tree trees.append(tree) return trees
Creates missing ``Tree`` objects for the given path. :param path: path given as a string. It may be a path to a file node (i.e. ``foo/bar/baz.txt``) or directory path - in that case it must end with slash (i.e. ``foo/bar/``). :param root_tree: ``dulwich.objects.Tree`` object from which we start traversing (should be commit's root tree)
def compile_datetime(rule): """ Compiler helper method: attempt to compile constant into object representing datetime object to enable relations and thus simple comparisons using Python operators. """ if isinstance(rule.value, datetime.datetime): return rule try: # Try numeric type return DatetimeRule(datetime.datetime.fromtimestamp(float(rule.value))) except (TypeError, ValueError): pass # Try RFC3339 timestamp string res = TIMESTAMP_RE.match(str(rule.value)) if res is not None: year, month, day, hour, minute, second = (int(n or 0) for n in res.group(*range(1, 7))) us_str = (res.group(7) or "0")[:6].ljust(6, "0") us_int = int(us_str) zonestr = res.group(8) zonespl = (0, 0) if zonestr in ['z', 'Z'] else [int(i) for i in zonestr.split(":")] zonediff = datetime.timedelta(minutes = zonespl[0]*60+zonespl[1]) return DatetimeRule(datetime.datetime(year, month, day, hour, minute, second, us_int) - zonediff) raise ValueError("Wrong datetime format '{}'".format(rule.value))
Compiler helper method: attempt to compile constant into object representing datetime object to enable relations and thus simple comparisons using Python operators.
def get_stp_mst_detail_output_msti_port_link_type(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_stp_mst_detail = ET.Element("get_stp_mst_detail") config = get_stp_mst_detail output = ET.SubElement(get_stp_mst_detail, "output") msti = ET.SubElement(output, "msti") instance_id_key = ET.SubElement(msti, "instance-id") instance_id_key.text = kwargs.pop('instance_id') port = ET.SubElement(msti, "port") link_type = ET.SubElement(port, "link-type") link_type.text = kwargs.pop('link_type') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def pixel_array_to_image(self, width, height, channels, undefined_on_failure=True, allow_rounding=False): """ Create a new SArray with all the values cast to :py:class:`turicreate.image.Image` of uniform size. Parameters ---------- width: int The width of the new images. height: int The height of the new images. channels: int. Number of channels of the new images. undefined_on_failure: bool , optional , default True If True, return None type instead of Image type in failure instances. If False, raises error upon failure. allow_rounding: bool, optional , default False If True, rounds non-integer values when converting to Image type. If False, raises error upon rounding. Returns ------- out : SArray[turicreate.Image] The SArray converted to the type 'turicreate.Image'. See Also -------- astype, str_to_datetime, datetime_to_str Examples -------- The MNIST data is scaled from 0 to 1, but our image type only loads integer pixel values from 0 to 255. If we just convert without scaling, all values below one would be cast to 0. >>> mnist_array = turicreate.SArray('https://static.turi.com/datasets/mnist/mnist_vec_sarray') >>> scaled_mnist_array = mnist_array * 255 >>> mnist_img_sarray = tc.SArray.pixel_array_to_image(scaled_mnist_array, 28, 28, 1, allow_rounding = True) """ if(self.dtype != array.array): raise TypeError("array_to_img expects SArray of arrays as input SArray") num_to_test = 10 num_test = min(len(self), num_to_test) mod_values = [val % 1 for x in range(num_test) for val in self[x]] out_of_range_values = [(val > 255 or val < 0) for x in range(num_test) for val in self[x]] if sum(mod_values) != 0.0 and not allow_rounding: raise ValueError("There are non-integer values in the array data. Images only support integer data values between 0 and 255. To permit rounding, set the 'allow_rounding' parameter to 1.") if sum(out_of_range_values) != 0: raise ValueError("There are values outside the range of 0 to 255. Images only support integer data values between 0 and 255.") from .. import extensions return extensions.vector_sarray_to_image_sarray(self, width, height, channels, undefined_on_failure)
Create a new SArray with all the values cast to :py:class:`turicreate.image.Image` of uniform size. Parameters ---------- width: int The width of the new images. height: int The height of the new images. channels: int. Number of channels of the new images. undefined_on_failure: bool , optional , default True If True, return None type instead of Image type in failure instances. If False, raises error upon failure. allow_rounding: bool, optional , default False If True, rounds non-integer values when converting to Image type. If False, raises error upon rounding. Returns ------- out : SArray[turicreate.Image] The SArray converted to the type 'turicreate.Image'. See Also -------- astype, str_to_datetime, datetime_to_str Examples -------- The MNIST data is scaled from 0 to 1, but our image type only loads integer pixel values from 0 to 255. If we just convert without scaling, all values below one would be cast to 0. >>> mnist_array = turicreate.SArray('https://static.turi.com/datasets/mnist/mnist_vec_sarray') >>> scaled_mnist_array = mnist_array * 255 >>> mnist_img_sarray = tc.SArray.pixel_array_to_image(scaled_mnist_array, 28, 28, 1, allow_rounding = True)
def pytype_to_ctype(t): """ Python -> pythonic type binding. """ if isinstance(t, List): return 'pythonic::types::list<{0}>'.format( pytype_to_ctype(t.__args__[0]) ) elif isinstance(t, Set): return 'pythonic::types::set<{0}>'.format( pytype_to_ctype(t.__args__[0]) ) elif isinstance(t, Dict): tkey, tvalue = t.__args__ return 'pythonic::types::dict<{0},{1}>'.format(pytype_to_ctype(tkey), pytype_to_ctype(tvalue)) elif isinstance(t, Tuple): return 'decltype(pythonic::types::make_tuple({0}))'.format( ", ".join('std::declval<{}>()'.format(pytype_to_ctype(p)) for p in t.__args__) ) elif isinstance(t, NDArray): dtype = pytype_to_ctype(t.__args__[0]) ndim = len(t.__args__) - 1 shapes = ','.join(('long' if s.stop == -1 or s.stop is None else 'std::integral_constant<long, {}>'.format( s.stop) ) for s in t.__args__[1:]) pshape = 'pythonic::types::pshape<{0}>'.format(shapes) arr = 'pythonic::types::ndarray<{0},{1}>'.format( dtype, pshape) if t.__args__[1].start == -1: return 'pythonic::types::numpy_texpr<{0}>'.format(arr) elif any(s.step is not None and s.step < 0 for s in t.__args__[1:]): slices = ", ".join(['pythonic::types::normalized_slice'] * ndim) return 'pythonic::types::numpy_gexpr<{0},{1}>'.format(arr, slices) else: return arr elif isinstance(t, Pointer): return 'pythonic::types::pointer<{0}>'.format( pytype_to_ctype(t.__args__[0]) ) elif isinstance(t, Fun): return 'pythonic::types::cfun<{0}({1})>'.format( pytype_to_ctype(t.__args__[-1]), ", ".join(pytype_to_ctype(arg) for arg in t.__args__[:-1]), ) elif t in PYTYPE_TO_CTYPE_TABLE: return PYTYPE_TO_CTYPE_TABLE[t] else: raise NotImplementedError("{0}:{1}".format(type(t), t))
Python -> pythonic type binding.
def load_XAML(file_obj, *args, **kwargs): """ Load a 3D XAML file. Parameters ---------- file_obj : file object Open, containing XAML file Returns ---------- result : dict kwargs for a trimesh constructor, including: vertices: (n,3) np.float64, points in space faces: (m,3) np.int64, indices of vertices face_colors: (m,4) np.uint8, RGBA colors vertex_normals: (n,3) np.float64, vertex normals """ def element_to_color(element): """ Turn an XML element into a (4,) np.uint8 RGBA color """ if element is None: return visual.DEFAULT_COLOR hexcolor = int(element.attrib['Color'].replace('#', ''), 16) opacity = float(element.attrib['Opacity']) rgba = [(hexcolor >> 16) & 0xFF, (hexcolor >> 8) & 0xFF, (hexcolor & 0xFF), opacity * 0xFF] rgba = np.array(rgba, dtype=np.uint8) return rgba def element_to_transform(element): """ Turn an XML element into a (4,4) np.float64 transformation matrix. """ try: matrix = next(element.iter( tag=ns + 'MatrixTransform3D')).attrib['Matrix'] matrix = np.array(matrix.split(), dtype=np.float64).reshape((4, 4)).T return matrix except StopIteration: # this will be raised if the MatrixTransform3D isn't in the passed # elements tree return np.eye(4) # read the file and parse XML file_data = file_obj.read() root = etree.XML(file_data) # the XML namespace ns = root.tag.split('}')[0] + '}' # the linked lists our results are going in vertices = collections.deque() faces = collections.deque() colors = collections.deque() normals = collections.deque() # iterate through the element tree # the GeometryModel3D tag contains a material and geometry for geometry in root.iter(tag=ns + 'GeometryModel3D'): # get the diffuse and specular colors specified in the material color_search = './/{ns}{color}Material/*/{ns}SolidColorBrush' diffuse = geometry.find(color_search.format(ns=ns, color='Diffuse')) specular = geometry.find(color_search.format(ns=ns, color='Specular')) # convert the element into a (4,) np.uint8 RGBA color diffuse = element_to_color(diffuse) specular = element_to_color(specular) # to get the final transform of a component we'll have to traverse # all the way back to the root node and save transforms we find current = geometry transforms = collections.deque() # when the root node is reached its parent will be None and we stop while current is not None: # element.find will only return elements that are direct children # of the current element as opposed to element.iter, # which will return any depth of child transform_element = current.find(ns + 'ModelVisual3D.Transform') if transform_element is not None: # we are traversing the tree backwards, so append new # transforms to the left of the deque transforms.appendleft(element_to_transform(transform_element)) # we are going from the lowest level of the tree to the highest # this avoids having to traverse any branches that don't have # geometry current = current.getparent() if len(transforms) == 0: # no transforms in the tree mean an identity matrix transform = np.eye(4) elif len(transforms) == 1: # one transform in the tree we can just use transform = transforms.pop() else: # multiple transforms we apply all of them in order transform = util.multi_dot(transforms) # iterate through the contained mesh geometry elements for g in geometry.iter(tag=ns + 'MeshGeometry3D'): c_normals = np.array(g.attrib['Normals'].replace(',', ' ').split(), dtype=np.float64).reshape((-1, 3)) c_vertices = np.array( g.attrib['Positions'].replace( ',', ' ').split(), dtype=np.float64).reshape( (-1, 3)) # bake in the transform as we're saving c_vertices = transformations.transform_points(c_vertices, transform) c_faces = np.array( g.attrib['TriangleIndices'].replace( ',', ' ').split(), dtype=np.int64).reshape( (-1, 3)) # save data to a sequence vertices.append(c_vertices) faces.append(c_faces) colors.append(np.tile(diffuse, (len(c_faces), 1))) normals.append(c_normals) # compile the results into clean numpy arrays result = dict() result['vertices'], result['faces'] = util.append_faces(vertices, faces) result['face_colors'] = np.vstack(colors) result['vertex_normals'] = np.vstack(normals) return result
Load a 3D XAML file. Parameters ---------- file_obj : file object Open, containing XAML file Returns ---------- result : dict kwargs for a trimesh constructor, including: vertices: (n,3) np.float64, points in space faces: (m,3) np.int64, indices of vertices face_colors: (m,4) np.uint8, RGBA colors vertex_normals: (n,3) np.float64, vertex normals
def _do_perform_delete_on_model(self): """ Perform the actual delete query on this model instance. """ if self._force_deleting: return self.with_trashed().where(self.get_key_name(), self.get_key()).force_delete() return self._run_soft_delete()
Perform the actual delete query on this model instance.
def get_trace(self, project_id, trace_id): """ Gets a single trace by its ID. Args: trace_id (str): ID of the trace to return. project_id (str): Required. ID of the Cloud project where the trace data is stored. Returns: A Trace dict. """ trace_pb = self._gapic_api.get_trace(project_id, trace_id) trace_mapping = _parse_trace_pb(trace_pb) return trace_mapping
Gets a single trace by its ID. Args: trace_id (str): ID of the trace to return. project_id (str): Required. ID of the Cloud project where the trace data is stored. Returns: A Trace dict.
def applyKeyMapping(self, mapping): """ Used as the second half of the key reassignment algorithm. Loops over each row in the table, replacing references to old row keys with the new values from the mapping. """ for coltype, colname in zip(self.columntypes, self.columnnames): if coltype in ligolwtypes.IDTypes and (self.next_id is None or colname != self.next_id.column_name): column = self.getColumnByName(colname) for i, old in enumerate(column): try: column[i] = mapping[old] except KeyError: pass
Used as the second half of the key reassignment algorithm. Loops over each row in the table, replacing references to old row keys with the new values from the mapping.
def thanksgiving(year, country='usa'): '''USA: last Thurs. of November, Canada: 2nd Mon. of October''' if country == 'usa': if year in [1940, 1941]: return nth_day_of_month(3, THU, NOV, year) elif year == 1939: return nth_day_of_month(4, THU, NOV, year) else: return nth_day_of_month(0, THU, NOV, year) if country == 'canada': return nth_day_of_month(2, MON, OCT, year)
USA: last Thurs. of November, Canada: 2nd Mon. of October
def download(client, target_dir): """Download inappproducts from play store.""" print('') print("download inappproducts") print('---------------------') products = client.list_inappproducts() for product in products: path = os.path.join(target_dir, 'products') del product['packageName'] mkdir_p(path) with open(os.path.join(path, product['sku'] + '.json'), 'w') as outfile: print("save product for {0}".format(product['sku'])) json.dump( product, outfile, sort_keys=True, indent=4, separators=(',', ': '))
Download inappproducts from play store.
def supported(cls, stream=sys.stdout): """ A class method that returns True if the current platform supports coloring terminal output using this method. Returns False otherwise. """ if not stream.isatty(): return False # auto color only on TTYs try: import curses except ImportError: return False else: try: try: return curses.tigetnum("colors") > 2 except curses.error: curses.setupterm() return curses.tigetnum("colors") > 2 except: raise # guess false in case of error return False
A class method that returns True if the current platform supports coloring terminal output using this method. Returns False otherwise.
def get_tournament(self, tag: crtag, **params: keys): """Get a tournament information Parameters ---------- tag: str A valid tournament tag. Minimum length: 3 Valid characters: 0289PYLQGRJCUV \*\*keys: Optional[list] = None Filter which keys should be included in the response \*\*exclude: Optional[list] = None Filter which keys should be excluded from the response \*\*timeout: Optional[int] = None Custom timeout that overwrites Client.timeout """ url = self.api.TOURNAMENT + '/' + tag return self._get_model(url, **params)
Get a tournament information Parameters ---------- tag: str A valid tournament tag. Minimum length: 3 Valid characters: 0289PYLQGRJCUV \*\*keys: Optional[list] = None Filter which keys should be included in the response \*\*exclude: Optional[list] = None Filter which keys should be excluded from the response \*\*timeout: Optional[int] = None Custom timeout that overwrites Client.timeout
def loading(self): """Context manager for when you need to instantiate entities upon unpacking""" if getattr(self, '_initialized', False): raise ValueError("Already loading") self._initialized = False yield self._initialized = True
Context manager for when you need to instantiate entities upon unpacking
def ast_to_code(ast, indent=0): # type: (Any, int) -> str """ Converts an ast into a python code representation of the AST. """ code = [] def append(line): # type: (str) -> None code.append((" " * indent) + line) if isinstance(ast, Node): append("ast.{}(".format(ast.__class__.__name__)) indent += 1 for i, k in enumerate(ast._fields, 1): v = getattr(ast, k) append("{}={},".format(k, ast_to_code(v, indent))) if ast.loc: append("loc={}".format(ast_to_code(ast.loc, indent))) indent -= 1 append(")") elif isinstance(ast, Loc): append("loc({}, {})".format(ast.start, ast.end)) elif isinstance(ast, list): if ast: append("[") indent += 1 for i, it in enumerate(ast, 1): is_last = i == len(ast) append(ast_to_code(it, indent) + ("," if not is_last else "")) indent -= 1 append("]") else: append("[]") else: append(repr(ast)) return "\n".join(code).strip()
Converts an ast into a python code representation of the AST.
def list_all(self): """Return all equipments in database :return: Dictionary with the following structure: :: {'equipaments': {'name' :< name_equipament >}, {... demais equipamentos ...} } :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ url = 'equipamento/list/' code, xml = self.submit(None, 'GET', url) return self.response(code, xml)
Return all equipments in database :return: Dictionary with the following structure: :: {'equipaments': {'name' :< name_equipament >}, {... demais equipamentos ...} } :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
def calc_time_step(self): """ Set the time step during time domain simulations Parameters ---------- convergence: bool truth value of the convergence of the last step niter: int current iteration count t: float current simulation time Returns ------- float computed time step size """ system = self.system config = self.config convergence = self.convergence niter = self.niter t = self.t if t == 0: self._calc_time_step_first() return if convergence: if niter >= 15: config.deltat = max(config.deltat * 0.5, config.deltatmin) elif niter <= 6: config.deltat = min(config.deltat * 1.1, config.deltatmax) else: config.deltat = max(config.deltat * 0.95, config.deltatmin) # adjust fixed time step if niter is high if config.fixt: config.deltat = min(config.tstep, config.deltat) else: config.deltat *= 0.9 if config.deltat < config.deltatmin: config.deltat = 0 if system.Fault.is_time(t) or system.Breaker.is_time(t): config.deltat = min(config.deltat, 0.002778) elif system.check_event(t): config.deltat = min(config.deltat, 0.002778) if config.method == 'fwdeuler': config.deltat = min(config.deltat, config.tstep) # last step size if self.t + config.deltat > config.tf: config.deltat = config.tf - self.t # reduce time step for fixed_times events for fixed_t in self.fixed_times: if (fixed_t > self.t) and (fixed_t <= self.t + config.deltat): config.deltat = fixed_t - self.t self.switch = True break self.h = config.deltat
Set the time step during time domain simulations Parameters ---------- convergence: bool truth value of the convergence of the last step niter: int current iteration count t: float current simulation time Returns ------- float computed time step size
def get_script_args(dist, executable=sys_executable, wininst=False): """Yield write_script() argument tuples for a distribution's entrypoints""" spec = str(dist.as_requirement()) header = get_script_header("", executable, wininst) for group in 'console_scripts', 'gui_scripts': for name, ep in dist.get_entry_map(group).items(): script_text = ( "# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r\n" "__requires__ = %(spec)r\n" "import sys\n" "from pkg_resources import load_entry_point\n" "\n" "if __name__ == '__main__':" "\n" " sys.exit(\n" " load_entry_point(%(spec)r, %(group)r, %(name)r)()\n" " )\n" ) % locals() if sys.platform=='win32' or wininst: # On Windows/wininst, add a .py extension and an .exe launcher if group=='gui_scripts': ext, launcher = '-script.pyw', 'gui.exe' old = ['.pyw'] new_header = re.sub('(?i)python.exe','pythonw.exe',header) else: ext, launcher = '-script.py', 'cli.exe' old = ['.py','.pyc','.pyo'] new_header = re.sub('(?i)pythonw.exe','python.exe',header) if is_64bit(): launcher = launcher.replace(".", "-64.") else: launcher = launcher.replace(".", "-32.") if os.path.exists(new_header[2:-1]) or sys.platform!='win32': hdr = new_header else: hdr = header yield (name+ext, hdr+script_text, 't', [name+x for x in old]) yield ( name+'.exe', resource_string('setuptools', launcher), 'b' # write in binary mode ) else: # On other platforms, we assume the right thing to do is to # just write the stub with no extension. yield (name, header+script_text)
Yield write_script() argument tuples for a distribution's entrypoints
def coupling_matrix_2j(j1, j2): ur"""For angular momenta $j_1, j_2$ the unitary transformation from the \ uncoupled basis into the $j = j_1 \oplus j_2$ coupled basis. >>> from sympy import Integer, pprint >>> L = 0 >>> S = 1/Integer(2) >>> pprint(coupling_matrix_2j(L, S)) ⎑1 0⎀ ⎒ βŽ₯ ⎣0 1⎦ >>> L = 1 >>> S = 1/Integer(2) >>> pprint(coupling_matrix_2j(L, S)) ⎑ -√6 √3 ⎀ ⎒0 ──── ── 0 0 0βŽ₯ ⎒ 3 3 βŽ₯ ⎒ βŽ₯ ⎒ -√3 √6 βŽ₯ ⎒0 0 0 ──── ── 0βŽ₯ ⎒ 3 3 βŽ₯ ⎒ βŽ₯ ⎒1 0 0 0 0 0βŽ₯ ⎒ βŽ₯ ⎒ √3 √6 βŽ₯ ⎒0 ── ── 0 0 0βŽ₯ ⎒ 3 3 βŽ₯ ⎒ βŽ₯ ⎒ √6 √3 βŽ₯ ⎒0 0 0 ── ── 0βŽ₯ ⎒ 3 3 βŽ₯ ⎒ βŽ₯ ⎣0 0 0 0 0 1⎦ """ # We calculate the quantum numbers for the uncoupled basis. M1 = [-j1 + i for i in range(2*j1+1)] M2 = [-j2 + i for i in range(2*j2+1)] j1j2nums = [(j1, m1, j2, m2) for m1 in M1 for m2 in M2] # We calculate the quantum numbers for the coupled basis. Jper = perm_j(j1, j2) jmjnums = [(J, MJ-J) for J in Jper for MJ in range(2*J+1)] # We build the transformation matrix. U = zeros((2*j1+1)*(2*j2+1)) for ii, numj in enumerate(jmjnums): j, mj = numj for jj, numi in enumerate(j1j2nums): j1, m1, j2, m2 = numi U[ii, jj] = clebsch_gordan(j1, j2, j, m1, m2, mj) return U
ur"""For angular momenta $j_1, j_2$ the unitary transformation from the \ uncoupled basis into the $j = j_1 \oplus j_2$ coupled basis. >>> from sympy import Integer, pprint >>> L = 0 >>> S = 1/Integer(2) >>> pprint(coupling_matrix_2j(L, S)) ⎑1 0⎀ ⎒ βŽ₯ ⎣0 1⎦ >>> L = 1 >>> S = 1/Integer(2) >>> pprint(coupling_matrix_2j(L, S)) ⎑ -√6 √3 ⎀ ⎒0 ──── ── 0 0 0βŽ₯ ⎒ 3 3 βŽ₯ ⎒ βŽ₯ ⎒ -√3 √6 βŽ₯ ⎒0 0 0 ──── ── 0βŽ₯ ⎒ 3 3 βŽ₯ ⎒ βŽ₯ ⎒1 0 0 0 0 0βŽ₯ ⎒ βŽ₯ ⎒ √3 √6 βŽ₯ ⎒0 ── ── 0 0 0βŽ₯ ⎒ 3 3 βŽ₯ ⎒ βŽ₯ ⎒ √6 √3 βŽ₯ ⎒0 0 0 ── ── 0βŽ₯ ⎒ 3 3 βŽ₯ ⎒ βŽ₯ ⎣0 0 0 0 0 1⎦
def upload_file(self, file_or_path, obj_name=None, content_type=None, etag=None, return_none=False, content_encoding=None, ttl=None, content_length=None, headers=None): """ Uploads the specified file to this container. If no name is supplied, the file's name will be used. Either a file path or an open file-like object may be supplied. A StorageObject reference to the uploaded file will be returned, unless 'return_none' is set to True. You may optionally set the `content_type` and `content_encoding` parameters; pyrax will create the appropriate headers when the object is stored. If the size of the file is known, it can be passed as `content_length`. If you wish for the object to be temporary, specify the time it should be stored in seconds in the `ttl` parameter. If this is specified, the object will be deleted after that number of seconds. The 'extra_info' parameter is included for backwards compatibility. It is no longer used at all, and will not be modified with swiftclient info, since swiftclient is not used any more. """ return self.create(file_or_path=file_or_path, obj_name=obj_name, content_type=content_type, etag=etag, content_encoding=content_encoding, headers=headers, content_length=content_length, ttl=ttl, return_none=return_none)
Uploads the specified file to this container. If no name is supplied, the file's name will be used. Either a file path or an open file-like object may be supplied. A StorageObject reference to the uploaded file will be returned, unless 'return_none' is set to True. You may optionally set the `content_type` and `content_encoding` parameters; pyrax will create the appropriate headers when the object is stored. If the size of the file is known, it can be passed as `content_length`. If you wish for the object to be temporary, specify the time it should be stored in seconds in the `ttl` parameter. If this is specified, the object will be deleted after that number of seconds. The 'extra_info' parameter is included for backwards compatibility. It is no longer used at all, and will not be modified with swiftclient info, since swiftclient is not used any more.
def unit(self): """ Returns the unit attribute of the underlying ncdf variable. If the units has a length (e.g is a list) and has precisely one element per field, the unit for this field is returned. """ unit = ncVarUnit(self._ncVar) fieldNames = self._ncVar.dtype.names # If the missing value attribute is a list with the same length as the number of fields, # return the missing value for field that equals the self.nodeName. if hasattr(unit, '__len__') and len(unit) == len(fieldNames): idx = fieldNames.index(self.nodeName) return unit[idx] else: return unit
Returns the unit attribute of the underlying ncdf variable. If the units has a length (e.g is a list) and has precisely one element per field, the unit for this field is returned.
def detect_protocol(cls, message): '''Attempt to detect the protocol from the message.''' main = cls._message_to_payload(message) def protocol_for_payload(payload): if not isinstance(payload, dict): return JSONRPCLoose # Will error # Obey an explicit "jsonrpc" version = payload.get('jsonrpc') if version == '2.0': return JSONRPCv2 if version == '1.0': return JSONRPCv1 # Now to decide between JSONRPCLoose and JSONRPCv1 if possible if 'result' in payload and 'error' in payload: return JSONRPCv1 return JSONRPCLoose if isinstance(main, list): parts = set(protocol_for_payload(payload) for payload in main) # If all same protocol, return it if len(parts) == 1: return parts.pop() # If strict protocol detected, return it, preferring JSONRPCv2. # This means a batch of JSONRPCv1 will fail for protocol in (JSONRPCv2, JSONRPCv1): if protocol in parts: return protocol # Will error if no parts return JSONRPCLoose return protocol_for_payload(main)
Attempt to detect the protocol from the message.
def create_archive(archive, filenames, verbosity=0, program=None, interactive=True): """Create given archive with given files.""" util.check_new_filename(archive) util.check_archive_filelist(filenames) if verbosity >= 0: util.log_info("Creating %s ..." % archive) res = _create_archive(archive, filenames, verbosity=verbosity, interactive=interactive, program=program) if verbosity >= 0: util.log_info("... %s created." % archive) return res
Create given archive with given files.
def is_web_url(string): """Check to see if string is an validly-formatted web url.""" assert isinstance(string, basestring) parsed_url = urllib.parse.urlparse(string) return ( ( parsed_url.scheme.lower() == 'http' or parsed_url.scheme.lower() == 'https' ) and parsed_url.netloc )
Check to see if string is an validly-formatted web url.
def scan_config_argument(ctx, param, value, config_dir=None): """Validate / translate config name/path values for click config arg. Wrapper on top of :func:`cli.scan_config`.""" if callable(config_dir): config_dir = config_dir() if not config: click.echo("Enter at least one CONFIG") click.echo(ctx.get_help(), color=ctx.color) ctx.exit() if isinstance(value, string_types): value = scan_config(value, config_dir=config_dir) elif isinstance(value, tuple): value = tuple([scan_config(v, config_dir=config_dir) for v in value]) return value
Validate / translate config name/path values for click config arg. Wrapper on top of :func:`cli.scan_config`.
def compare(jaide, commands): """ Perform a show | compare with some set commands. @param jaide: The jaide connection to the device. @type jaide: jaide.Jaide object @param commands: The set commands to send to the device to compare with. @type commands: str or list @returns: The output from the device. @rtype str """ output = color("show | compare:\n", 'yel') return output + color_diffs(jaide.compare_config(commands))
Perform a show | compare with some set commands. @param jaide: The jaide connection to the device. @type jaide: jaide.Jaide object @param commands: The set commands to send to the device to compare with. @type commands: str or list @returns: The output from the device. @rtype str
def offset(self): """int: offset of the key within the Windows Registry file or None.""" if not self._registry_key and self._registry: self._GetKeyFromRegistry() if not self._registry_key: return None return self._registry_key.offset
int: offset of the key within the Windows Registry file or None.
def collect_results(self) -> Optional[Tuple[int, Dict[str, float]]]: """ Returns the decoded checkpoint and the decoder metrics or None if the queue is empty. """ self.wait_to_finish() if self.decoder_metric_queue.empty(): if self._results_pending: self._any_process_died = True self._results_pending = False return None decoded_checkpoint, decoder_metrics = self.decoder_metric_queue.get() assert self.decoder_metric_queue.empty() self._results_pending = False logger.info("Decoder-%d finished: %s", decoded_checkpoint, decoder_metrics) return decoded_checkpoint, decoder_metrics
Returns the decoded checkpoint and the decoder metrics or None if the queue is empty.
def currentDateTime(self): """ Returns the current date time for this widget. :return <datetime.datetime> """ view = self.uiGanttVIEW scene = view.scene() point = view.mapToScene(0, 0) return scene.datetimeAt(point.x())
Returns the current date time for this widget. :return <datetime.datetime>
def _create_ret_object(self, status=SUCCESS, data=None, error=False, error_message=None, error_cause=None): """ Create generic reponse objects. :param str status: The SUCCESS or FAILURE of the request :param obj data: The data to return :param bool error: Set to True to add Error response :param str error_message: The generic error message :param str error_cause: The cause of the error :returns: A dictionary of values """ ret = {} if status == self.FAILURE: ret['status'] = self.FAILURE else: ret['status'] = self.SUCCESS ret['data'] = data if error: ret['error'] = {} if error_message is not None: ret['error']['message'] = error_message if error_cause is not None: ret['error']['cause'] = error_cause else: ret['error'] = None return ret
Create generic reponse objects. :param str status: The SUCCESS or FAILURE of the request :param obj data: The data to return :param bool error: Set to True to add Error response :param str error_message: The generic error message :param str error_cause: The cause of the error :returns: A dictionary of values
def __configure_interior(self, *args): """ Private function to configure the interior Frame. :param args: Tkinter event """ # Resize the canvas scrollregion to fit the entire frame (size_x, size_y) = (self.interior.winfo_reqwidth(), self.interior.winfo_reqheight()) self._canvas.config(scrollregion="0 0 {0} {1}".format(size_x, size_y)) if self.interior.winfo_reqwidth() is not self._canvas.winfo_width(): # If the interior Frame is wider than the canvas, automatically resize the canvas to fit the frame self._canvas.config(width=self.interior.winfo_reqwidth())
Private function to configure the interior Frame. :param args: Tkinter event
def get_connection(self): """Get a connection to this Database. Connections are retrieved from a pool. """ if not self.open: raise exc.ResourceClosedError('Database closed.') return Connection(self._engine.connect())
Get a connection to this Database. Connections are retrieved from a pool.
def add(self, nb = 1, name = None, xid = None): """ Create one or many workers. """ for x in xrange(nb): self.count_lock.acquire() if self.workers >= self.max_workers: self.count_lock.release() continue self.workers += 1 if xid is None: xid = self.workers self.count_lock.release() self.kill_event.clear() w = WorkerThread(xid, self) w.setName(self.get_name(xid, name)) w.start()
Create one or many workers.
def fetch(self, wait=0): """ get the task result objects. :param int wait: how many milliseconds to wait for a result :return: an unsorted list of task objects """ if self.started: return fetch(self.id, wait=wait, cached=self.cached)
get the task result objects. :param int wait: how many milliseconds to wait for a result :return: an unsorted list of task objects
def extend(validator, validators=(), version=None, type_checker=None): """ Create a new validator class by extending an existing one. Arguments: validator (jsonschema.IValidator): an existing validator class validators (collections.Mapping): a mapping of new validator callables to extend with, whose structure is as in `create`. .. note:: Any validator callables with the same name as an existing one will (silently) replace the old validator callable entirely, effectively overriding any validation done in the "parent" validator class. If you wish to instead extend the behavior of a parent's validator callable, delegate and call it directly in the new validator function by retrieving it using ``OldValidator.VALIDATORS["validator_name"]``. version (str): a version for the new validator class type_checker (jsonschema.TypeChecker): a type checker, used when applying the :validator:`type` validator. If unprovided, the type checker of the extended `jsonschema.IValidator` will be carried along.` Returns: a new `jsonschema.IValidator` class extending the one provided .. note:: Meta Schemas The new validator class will have its parent's meta schema. If you wish to change or extend the meta schema in the new validator class, modify ``META_SCHEMA`` directly on the returned class. Note that no implicit copying is done, so a copy should likely be made before modifying it, in order to not affect the old validator. """ all_validators = dict(validator.VALIDATORS) all_validators.update(validators) if type_checker is None: type_checker = validator.TYPE_CHECKER elif validator._CREATED_WITH_DEFAULT_TYPES: raise TypeError( "Cannot extend a validator created with default_types " "with a type_checker. Update the validator to use a " "type_checker when created." ) return create( meta_schema=validator.META_SCHEMA, validators=all_validators, version=version, type_checker=type_checker, id_of=validator.ID_OF, )
Create a new validator class by extending an existing one. Arguments: validator (jsonschema.IValidator): an existing validator class validators (collections.Mapping): a mapping of new validator callables to extend with, whose structure is as in `create`. .. note:: Any validator callables with the same name as an existing one will (silently) replace the old validator callable entirely, effectively overriding any validation done in the "parent" validator class. If you wish to instead extend the behavior of a parent's validator callable, delegate and call it directly in the new validator function by retrieving it using ``OldValidator.VALIDATORS["validator_name"]``. version (str): a version for the new validator class type_checker (jsonschema.TypeChecker): a type checker, used when applying the :validator:`type` validator. If unprovided, the type checker of the extended `jsonschema.IValidator` will be carried along.` Returns: a new `jsonschema.IValidator` class extending the one provided .. note:: Meta Schemas The new validator class will have its parent's meta schema. If you wish to change or extend the meta schema in the new validator class, modify ``META_SCHEMA`` directly on the returned class. Note that no implicit copying is done, so a copy should likely be made before modifying it, in order to not affect the old validator.
def list_same_dimensions(self, unit_object): """ Return a list of base unit names that this registry knows about that are of equivalent dimensions to *unit_object*. """ equiv = [k for k, v in self.lut.items() if v[1] is unit_object.dimensions] equiv = list(sorted(set(equiv))) return equiv
Return a list of base unit names that this registry knows about that are of equivalent dimensions to *unit_object*.
def MeshLines(*inputobj, **options): """ Build the line segments between two lists of points `startPoints` and `endPoints`. `startPoints` can be also passed in the form ``[[point1, point2], ...]``. A dolfin ``Mesh`` that was deformed/modified by a function can be passed together as inputs. :param float scale: apply a rescaling factor to the length """ scale = options.pop("scale", 1) lw = options.pop("lw", 1) c = options.pop("c", None) alpha = options.pop("alpha", 1) mesh, u = _inputsort(inputobj) startPoints = mesh.coordinates() u_values = np.array([u(p) for p in mesh.coordinates()]) if not utils.isSequence(u_values[0]): printc("~times Error: cannot show Lines for 1D scalar values!", c=1) exit() endPoints = mesh.coordinates() + u_values if u_values.shape[1] == 2: # u_values is 2D u_values = np.insert(u_values, 2, 0, axis=1) # make it 3d startPoints = np.insert(startPoints, 2, 0, axis=1) # make it 3d endPoints = np.insert(endPoints, 2, 0, axis=1) # make it 3d actor = shapes.Lines( startPoints, endPoints, scale=scale, lw=lw, c=c, alpha=alpha ) actor.mesh = mesh actor.u = u actor.u_values = u_values return actor
Build the line segments between two lists of points `startPoints` and `endPoints`. `startPoints` can be also passed in the form ``[[point1, point2], ...]``. A dolfin ``Mesh`` that was deformed/modified by a function can be passed together as inputs. :param float scale: apply a rescaling factor to the length
def dt(self, start_node=None): """main method to create an RSTTree from the output of get_rs3_data(). TODO: add proper documentation """ if start_node is None: return self.root2tree(start_node=start_node) elem_id = start_node if elem_id not in self.elem_dict: return [] elem = self.elem_dict[elem_id] elem_type = elem['element_type'] assert elem_type in ('segment', 'group') if elem_type == 'segment': return self.segment2tree( elem_id, elem, elem_type, start_node=start_node) else: return self.group2tree( elem_id, elem, elem_type, start_node=start_node)
main method to create an RSTTree from the output of get_rs3_data(). TODO: add proper documentation
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, **kw): """ Serialize ``obj`` as a JSON formatted stream to ``fp`` (a ``.write()``-supporting file-like object). If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) will be skipped instead of raising a ``TypeError``. If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp`` may be ``unicode`` instances, subject to normal Python ``str`` to ``unicode`` coercion rules. Unless ``fp.write()`` explicitly understands ``unicode`` (as in ``codecs.getwriter()``) this is likely to cause an error. If ``check_circular`` is ``False``, then the circular reference check for container types will be skipped and a circular reference will result in an ``OverflowError`` (or worse). If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in strict compliance of the JSON specification, instead of using the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). If ``indent`` is a non-negative integer, then JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0 will only insert newlines. ``None`` is the most compact representation. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the ``.default()`` method to serialize additional types), specify it with the ``cls`` kwarg. """ if cls is None: cls = JSONEncoder iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, indent=indent, **kw).iterencode(obj) # could accelerate with writelines in some versions of Python, at # a debuggability cost for chunk in iterable: fp.write(chunk)
Serialize ``obj`` as a JSON formatted stream to ``fp`` (a ``.write()``-supporting file-like object). If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) will be skipped instead of raising a ``TypeError``. If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp`` may be ``unicode`` instances, subject to normal Python ``str`` to ``unicode`` coercion rules. Unless ``fp.write()`` explicitly understands ``unicode`` (as in ``codecs.getwriter()``) this is likely to cause an error. If ``check_circular`` is ``False``, then the circular reference check for container types will be skipped and a circular reference will result in an ``OverflowError`` (or worse). If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in strict compliance of the JSON specification, instead of using the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). If ``indent`` is a non-negative integer, then JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0 will only insert newlines. ``None`` is the most compact representation. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the ``.default()`` method to serialize additional types), specify it with the ``cls`` kwarg.
def shared_databases(self): """ Retrieves a list containing the names of databases shared with this account. :returns: List of database names """ endpoint = '/'.join(( self.server_url, '_api', 'v2', 'user', 'shared_databases')) resp = self.r_session.get(endpoint) resp.raise_for_status() data = response_to_json_dict(resp) return data.get('shared_databases', [])
Retrieves a list containing the names of databases shared with this account. :returns: List of database names
def _file_in_patch(self, filename, patch, ignore): """ Checks if a backup file of the filename in the current patch exists """ file = self.quilt_pc + File(os.path.join(patch.get_name(), filename)) if file.exists(): if ignore: return True else: raise QuiltError("File %s is already in patch %s" % (filename, patch.get_name())) return False
Checks if a backup file of the filename in the current patch exists
def _get_magnitude_term(self, C, mag): """ Returns the magnitude scaling term provided in Equation (5) """ dmag = mag - 8.0 return C["c0"] + C["c3"] * dmag + C["c4"] * (dmag ** 2.)
Returns the magnitude scaling term provided in Equation (5)
def matches_to_marker_results(df): """Perfect BLAST matches to marker results dict Parse perfect BLAST matches to marker results dict. Args: df (pandas.DataFrame): DataFrame of perfect BLAST matches Returns: dict: cgMLST330 marker names to matching allele numbers """ assert isinstance(df, pd.DataFrame) from collections import defaultdict d = defaultdict(list) for idx, row in df.iterrows(): marker = row['marker'] d[marker].append(row) marker_results = {} for k,v in d.items(): if len(v) > 1: logging.debug('Multiple potential cgMLST allele matches (n=%s) found for marker %s. Selecting match on longest contig.', len(v), k) df_marker = pd.DataFrame(v) df_marker.sort_values('slen', ascending=False, inplace=True) for i,r in df_marker.iterrows(): allele = r['allele_name'] slen = r['slen'] logging.debug('Selecting allele %s from contig with length %s', allele, slen) seq = r['sseq'] if '-' in seq: logging.warning('Gaps found in allele. Removing gaps. %s', r) seq = seq.replace('-', '').upper() allele = allele_name(seq) marker_results[k] = allele_result_dict(allele, seq, r.to_dict()) break elif len(v) == 1: row = v[0] seq = row['sseq'] if '-' in seq: logging.warning('Gaps found in allele. Removing gaps. %s', row) seq = seq.replace('-', '').upper() allele = allele_name(seq) marker_results[k] = allele_result_dict(allele, seq, row.to_dict()) else: err_msg = 'Empty list of matches for marker {}'.format(k) logging.error(err_msg) raise Exception(err_msg) return marker_results
Perfect BLAST matches to marker results dict Parse perfect BLAST matches to marker results dict. Args: df (pandas.DataFrame): DataFrame of perfect BLAST matches Returns: dict: cgMLST330 marker names to matching allele numbers
async def release( self, *, comment: str = None, erase: bool = None, secure_erase: bool = None, quick_erase: bool = None, wait: bool = False, wait_interval: int = 5): """ Release the machine. :param comment: Reason machine was released. :type comment: `str` :param erase: Erase the disk when release. :type erase: `bool` :param secure_erase: Use the drive's secure erase feature if available. :type secure_erase: `bool` :param quick_erase: Wipe the just the beginning and end of the disk. This is not secure. :param wait: If specified, wait until the deploy is complete. :type wait: `bool` :param wait_interval: How often to poll, defaults to 5 seconds. :type wait_interval: `int` """ params = remove_None({ "system_id": self.system_id, "comment": comment, "erase": erase, "secure_erase": secure_erase, "quick_erase": quick_erase, }) self._data = await self._handler.release(**params) if not wait: return self else: # Wait for machine to be released while self.status in [ NodeStatus.RELEASING, NodeStatus.DISK_ERASING]: await asyncio.sleep(wait_interval) try: self._data = await self._handler.read( system_id=self.system_id) except CallError as error: if error.status == HTTPStatus.NOT_FOUND: # Release must have been on a machine in a pod. This # machine no longer exists. Just return the machine # as it has been released. return self else: raise if self.status == NodeStatus.FAILED_RELEASING: msg = "{hostname} failed to be released.".format( hostname=self.hostname ) raise FailedReleasing(msg, self) elif self.status == NodeStatus.FAILED_DISK_ERASING: msg = "{hostname} failed to erase disk.".format( hostname=self.hostname ) raise FailedDiskErasing(msg, self) return self
Release the machine. :param comment: Reason machine was released. :type comment: `str` :param erase: Erase the disk when release. :type erase: `bool` :param secure_erase: Use the drive's secure erase feature if available. :type secure_erase: `bool` :param quick_erase: Wipe the just the beginning and end of the disk. This is not secure. :param wait: If specified, wait until the deploy is complete. :type wait: `bool` :param wait_interval: How often to poll, defaults to 5 seconds. :type wait_interval: `int`
def balance(self): """Check this transaction for correctness""" self.check() if not sum(map(lambda x: x.amount, self.src)) == -self.amount: raise XnBalanceError("Sum of source amounts " "not equal to transaction amount") if not sum(map(lambda x: x.amount, self.dst)) == self.amount: raise XnBalanceError("Sum of destination amounts " "not equal to transaction amount") return True
Check this transaction for correctness
def wv45(msg): """Wake vortex. Args: msg (String): 28 bytes hexadecimal message string Returns: int: Wake vortex level. 0=NIL, 1=Light, 2=Moderate, 3=Severe """ d = hex2bin(data(msg)) if d[12] == '0': return None ws = bin2int(d[13:15]) return ws
Wake vortex. Args: msg (String): 28 bytes hexadecimal message string Returns: int: Wake vortex level. 0=NIL, 1=Light, 2=Moderate, 3=Severe
def _full_name(self, record_name): """Returns full domain name of a sub-domain name""" # Handle None and empty strings if not record_name: return self.domain return super(Provider, self)._full_name(record_name)
Returns full domain name of a sub-domain name
def _format_finite(negative, digits, dot_pos): """Given a (possibly empty) string of digits and an integer dot_pos indicating the position of the decimal point relative to the start of that string, output a formatted numeric string with the same value and same implicit exponent.""" # strip leading zeros olddigits = digits digits = digits.lstrip('0') dot_pos -= len(olddigits) - len(digits) # value is 0.digits * 10**dot_pos use_exponent = dot_pos <= -4 or dot_pos > len(digits) if use_exponent: exp = dot_pos - 1 if digits else dot_pos dot_pos -= exp # left pad with zeros, insert decimal point, and add exponent if dot_pos <= 0: digits = '0' * (1 - dot_pos) + digits dot_pos += 1 - dot_pos assert 1 <= dot_pos <= len(digits) if dot_pos < len(digits): digits = digits[:dot_pos] + '.' + digits[dot_pos:] if use_exponent: digits += "e{0:+03d}".format(exp) return '-' + digits if negative else digits
Given a (possibly empty) string of digits and an integer dot_pos indicating the position of the decimal point relative to the start of that string, output a formatted numeric string with the same value and same implicit exponent.
def get_product(id=None, name=None): """ Get a specific Product by name or ID """ content = get_product_raw(id, name) if content: return utils.format_json(content)
Get a specific Product by name or ID
def wirevector_subset(self, cls=None, exclude=tuple()): """Return set of wirevectors, filtered by the type or tuple of types provided as cls. If no cls is specified, the full set of wirevectors associated with the Block are returned. If cls is a single type, or a tuple of types, only those wirevectors of the matching types will be returned. This is helpful for getting all inputs, outputs, or registers of a block for example.""" if cls is None: initial_set = self.wirevector_set else: initial_set = (x for x in self.wirevector_set if isinstance(x, cls)) if exclude == tuple(): return set(initial_set) else: return set(x for x in initial_set if not isinstance(x, exclude))
Return set of wirevectors, filtered by the type or tuple of types provided as cls. If no cls is specified, the full set of wirevectors associated with the Block are returned. If cls is a single type, or a tuple of types, only those wirevectors of the matching types will be returned. This is helpful for getting all inputs, outputs, or registers of a block for example.
def create(url, filename): """Create new fMRI for given experiment by uploading local file. Expects an tar-archive. Parameters ---------- url : string Url to POST fMRI create request filename : string Path to tar-archive on local disk Returns ------- string Url of created functional data resource """ # Upload file to create fMRI resource. If response is not 201 the # uploaded file is not a valid functional data archive files = {'file': open(filename, 'rb')} response = requests.post(url, files=files) if response.status_code != 201: raise ValueError('invalid file: ' + filename) return references_to_dict(response.json()['links'])[REF_SELF]
Create new fMRI for given experiment by uploading local file. Expects an tar-archive. Parameters ---------- url : string Url to POST fMRI create request filename : string Path to tar-archive on local disk Returns ------- string Url of created functional data resource
def one_hot_encoding(input_tensor, num_labels): """ One-hot encode labels from input """ xview = input_tensor.view(-1, 1).to(torch.long) onehot = torch.zeros(xview.size(0), num_labels, device=input_tensor.device, dtype=torch.float) onehot.scatter_(1, xview, 1) return onehot.view(list(input_tensor.shape) + [-1])
One-hot encode labels from input
def tf_action_exploration(self, action, exploration, action_spec): """ Applies optional exploration to the action (post-processor for action outputs). Args: action (tf.Tensor): The original output action tensor (to be post-processed). exploration (Exploration): The Exploration object to use. action_spec (dict): Dict specifying the action space. Returns: The post-processed action output tensor. """ action_shape = tf.shape(input=action) exploration_value = exploration.tf_explore( episode=self.global_episode, timestep=self.global_timestep, shape=action_spec['shape'] ) exploration_value = tf.expand_dims(input=exploration_value, axis=0) if action_spec['type'] == 'bool': action = tf.where( condition=(tf.random_uniform(shape=action_shape) < exploration_value), x=(tf.random_uniform(shape=action_shape) < 0.5), y=action ) elif action_spec['type'] == 'int': action = tf.where( condition=(tf.random_uniform(shape=action_shape) < exploration_value), x=tf.random_uniform(shape=action_shape, maxval=action_spec['num_actions'], dtype=util.tf_dtype('int')), y=action ) elif action_spec['type'] == 'float': noise = tf.random_normal(shape=action_shape, dtype=util.tf_dtype('float')) action += noise * exploration_value if 'min_value' in action_spec: action = tf.clip_by_value( t=action, clip_value_min=action_spec['min_value'], clip_value_max=action_spec['max_value'] ) return action
Applies optional exploration to the action (post-processor for action outputs). Args: action (tf.Tensor): The original output action tensor (to be post-processed). exploration (Exploration): The Exploration object to use. action_spec (dict): Dict specifying the action space. Returns: The post-processed action output tensor.
def _element_keywords(cls, backend, elements=None): "Returns a dictionary of element names to allowed keywords" if backend not in Store.loaded_backends(): return {} mapping = {} backend_options = Store.options(backend) elements = elements if elements is not None else backend_options.keys() for element in elements: if '.' in element: continue element = element if isinstance(element, tuple) else (element,) element_keywords = [] options = backend_options['.'.join(element)] for group in Options._option_groups: element_keywords.extend(options[group].allowed_keywords) mapping[element[0]] = element_keywords return mapping
Returns a dictionary of element names to allowed keywords
def reset(self): """ Called when open/close a project. Cleanup internal stuff """ self._allocated_node_names = set() self._nodes = {} self._links = {} self._drawings = {} self._snapshots = {} # List the available snapshots snapshot_dir = os.path.join(self.path, "snapshots") if os.path.exists(snapshot_dir): for snap in os.listdir(snapshot_dir): if snap.endswith(".gns3project"): snapshot = Snapshot(self, filename=snap) self._snapshots[snapshot.id] = snapshot # Create the project on demand on the compute node self._project_created_on_compute = set()
Called when open/close a project. Cleanup internal stuff
def eth_getStorageAt(self, address, position=0, block=BLOCK_TAG_LATEST): """https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getstorageat :param address: Storage address :type address: str :param position: Position in storage (optional) :type position: int :param block: Block tag or number (optional) :type block: int or BLOCK_TAGS :rtype: int """ block = validate_block(block) return hex_to_dec((yield from self.rpc_call('eth_getStorageAt', [address, hex(position), block])))
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getstorageat :param address: Storage address :type address: str :param position: Position in storage (optional) :type position: int :param block: Block tag or number (optional) :type block: int or BLOCK_TAGS :rtype: int
def read_memory(self, addr, transfer_size=32, now=True): """! @brief Read a memory location. By default, a word will be read. """ assert transfer_size in (8, 16, 32) if transfer_size == 32: result = conversion.byte_list_to_u32le_list(self._link.read_mem32(addr, 4, self._apsel))[0] elif transfer_size == 16: result = conversion.byte_list_to_u16le_list(self._link.read_mem16(addr, 2, self._apsel))[0] elif transfer_size == 8: result = self._link.read_mem8(addr, 1, self._apsel)[0] def read_callback(): return result return result if now else read_callback
! @brief Read a memory location. By default, a word will be read.
def print_matrix(X, decimals=1): """Pretty printing for numpy matrix X""" for row in np.round(X, decimals=decimals): print(row)
Pretty printing for numpy matrix X
def diam_swamee(FlowRate, HeadLossFric, Length, Nu, PipeRough): """Return the inner diameter of a pipe. The Swamee Jain equation is dimensionally correct and returns the inner diameter of a pipe given the flow rate and the head loss due to shear on the pipe walls. The Swamee Jain equation does NOT take minor losses into account. This equation ONLY applies to turbulent flow. """ #Checking input validity ut.check_range([FlowRate, ">0", "Flow rate"], [Length, ">0", "Length"], [HeadLossFric, ">0", "Headloss due to friction"], [Nu, ">0", "Nu"], [PipeRough, "0-1", "Pipe roughness"]) a = ((PipeRough ** 1.25) * ((Length * FlowRate**2) / (gravity.magnitude * HeadLossFric) )**4.75 ) b = (Nu * FlowRate**9.4 * (Length / (gravity.magnitude * HeadLossFric)) ** 5.2 ) return 0.66 * (a+b)**0.04
Return the inner diameter of a pipe. The Swamee Jain equation is dimensionally correct and returns the inner diameter of a pipe given the flow rate and the head loss due to shear on the pipe walls. The Swamee Jain equation does NOT take minor losses into account. This equation ONLY applies to turbulent flow.
def save_config( self, cmd="copy running-configuration startup-configuration", confirm=False, confirm_response="", ): """Saves Config""" return super(DellForce10SSH, self).save_config( cmd=cmd, confirm=confirm, confirm_response=confirm_response )
Saves Config
def drop_duplicates(self, subset=None, keep='min'): """Return DataFrame with duplicate rows (excluding index) removed, optionally only considering subset columns. Note that the row order is NOT maintained due to hashing. Parameters ---------- subset : list of str, optional Which columns to consider keep : {'+', '*', 'min', 'max'}, optional What to select from the duplicate rows. These correspond to the possible merge operations in Weld. Note that '+' and '-' might produce unexpected results for strings. Returns ------- DataFrame DataFrame without duplicate rows. """ subset = check_and_obtain_subset_columns(subset, self) df = self.reset_index() df_names = df._gather_column_names() subset_indices = [df_names.index(col_name) for col_name in subset] weld_objects = weld_drop_duplicates(df._gather_data_for_weld(), df._gather_weld_types(), subset_indices, keep) index_data = self.index._gather_data(name=None) new_index = [Index(weld_objects[i], v.dtype, k) for i, k, v in zip(list(range(len(index_data))), index_data.keys(), index_data.values())] if len(new_index) > 1: new_index = MultiIndex(new_index, self.index._gather_names()) else: new_index = new_index[0] new_data = OrderedDict((sr.name, Series(obj, new_index, sr.dtype, sr.name)) for sr, obj in zip(self._iter(), weld_objects[len(index_data):])) return DataFrame(new_data, new_index)
Return DataFrame with duplicate rows (excluding index) removed, optionally only considering subset columns. Note that the row order is NOT maintained due to hashing. Parameters ---------- subset : list of str, optional Which columns to consider keep : {'+', '*', 'min', 'max'}, optional What to select from the duplicate rows. These correspond to the possible merge operations in Weld. Note that '+' and '-' might produce unexpected results for strings. Returns ------- DataFrame DataFrame without duplicate rows.
def _pycall_path_simple( x1: int, y1: int, x2: int, y2: int, handle: Any ) -> float: """Does less and should run faster, just calls the handle function.""" return ffi.from_handle(handle)(x1, y1, x2, y2)
Does less and should run faster, just calls the handle function.
def annotate_op(self, op): """ Takes a bytecode operation (:class:`Op`) and annotates it using the data contained in this code object. Arguments: op(Op): An :class:`Op` instance. Returns: AnnotatedOp: An annotated bytecode operation. """ if isinstance(op, Label): return op else: return AnnotatedOp(self, op.name, op.arg)
Takes a bytecode operation (:class:`Op`) and annotates it using the data contained in this code object. Arguments: op(Op): An :class:`Op` instance. Returns: AnnotatedOp: An annotated bytecode operation.
def _set_config(self, config=None): """Set this component's initial configuration""" if not config: config = {} try: # pprint(self.configschema) self.config = self.componentmodel(config) # self.log("Config schema:", lvl=critical) # pprint(self.config.__dict__) # pprint(self.config._fields) try: name = self.config.name self.log("Name set to: ", name, lvl=verbose) except (AttributeError, KeyError): # pragma: no cover self.log("Has no name.", lvl=verbose) try: self.config.name = self.uniquename except (AttributeError, KeyError) as e: # pragma: no cover self.log("Cannot set component name for configuration: ", e, type(e), self.name, exc=True, lvl=critical) try: uuid = self.config.uuid self.log("UUID set to: ", uuid, lvl=verbose) except (AttributeError, KeyError): self.log("Has no UUID", lvl=verbose) self.config.uuid = str(uuid4()) try: notes = self.config.notes self.log("Notes set to: ", notes, lvl=verbose) except (AttributeError, KeyError): self.log("Has no notes, trying docstring", lvl=verbose) notes = self.__doc__ if notes is None: notes = "No notes." else: notes = notes.lstrip().rstrip() self.log(notes) self.config.notes = notes try: componentclass = self.config.componentclass self.log("Componentclass set to: ", componentclass, lvl=verbose) except (AttributeError, KeyError): self.log("Has no component class", lvl=verbose) self.config.componentclass = self.name except ValidationError as e: self.log("Not setting invalid component configuration: ", e, type(e), exc=True, lvl=error)
Set this component's initial configuration
def wrap_penalty(p, fit_linear, linear_penalty=0.): """ tool to account for unity penalty on the linear term of any feature. example: p = wrap_penalty(derivative, fit_linear=True)(n, coef) Parameters ---------- p : callable. penalty-matrix-generating function. fit_linear : boolean. whether the current feature has a linear term or not. linear_penalty : float, default: 0. penalty on the linear term Returns ------- wrapped_p : callable modified penalty-matrix-generating function """ def wrapped_p(n, *args): if fit_linear: if n == 1: return sp.sparse.block_diag([linear_penalty], format='csc') return sp.sparse.block_diag([linear_penalty, p(n-1, *args)], format='csc') else: return p(n, *args) return wrapped_p
tool to account for unity penalty on the linear term of any feature. example: p = wrap_penalty(derivative, fit_linear=True)(n, coef) Parameters ---------- p : callable. penalty-matrix-generating function. fit_linear : boolean. whether the current feature has a linear term or not. linear_penalty : float, default: 0. penalty on the linear term Returns ------- wrapped_p : callable modified penalty-matrix-generating function
def f2p(phrase, max_word_size=15, cutoff=3): """Convert a Finglish phrase to the most probable Persian phrase. """ results = f2p_list(phrase, max_word_size, cutoff) return ' '.join(i[0][0] for i in results)
Convert a Finglish phrase to the most probable Persian phrase.
def _unpack_zipfile(filename, extract_dir): """Unpack zip `filename` to `extract_dir` """ try: import zipfile except ImportError: raise ReadError('zlib not supported, cannot unpack this archive.') if not zipfile.is_zipfile(filename): raise ReadError("%s is not a zip file" % filename) zip = zipfile.ZipFile(filename) try: for info in zip.infolist(): name = info.filename # don't extract absolute paths or ones with .. in them if name.startswith('/') or '..' in name: continue target = os.path.join(extract_dir, *name.split('/')) if not target: continue _ensure_directory(target) if not name.endswith('/'): # file data = zip.read(info.filename) f = open(target, 'wb') try: f.write(data) finally: f.close() del data finally: zip.close()
Unpack zip `filename` to `extract_dir`
def Decrypt(self, encrypted_data): """Decrypts the encrypted data. Args: encrypted_data (bytes): encrypted data. Returns: tuple[bytes, bytes]: decrypted data and remaining encrypted data. """ index_split = -(len(encrypted_data) % AES.block_size) if index_split: remaining_encrypted_data = encrypted_data[index_split:] encrypted_data = encrypted_data[:index_split] else: remaining_encrypted_data = b'' decrypted_data = self._aes_cipher.decrypt(encrypted_data) return decrypted_data, remaining_encrypted_data
Decrypts the encrypted data. Args: encrypted_data (bytes): encrypted data. Returns: tuple[bytes, bytes]: decrypted data and remaining encrypted data.
def allocate_objects(self, eps = 0.01, noise_size = 1): """! @brief Allocates object segments. @param[in] eps (double): Tolerance level that define maximal difference between phases of oscillators in one segment. @param[in] noise_size (uint): Threshold that defines noise - segments size (in pixels) that is less then the threshold is considered as a noise. @return (list) Object segments where each object segment consists of indexes of pixels that forms object segment. """ if (self.__object_segment_analysers is None): return []; segments = []; for object_segment_analyser in self.__object_segment_analysers: indexes = object_segment_analyser['color_segment']; analyser = object_segment_analyser['analyser']; segments += analyser.allocate_clusters(eps, indexes); real_segments = [segment for segment in segments if len(segment) > noise_size]; return real_segments;
! @brief Allocates object segments. @param[in] eps (double): Tolerance level that define maximal difference between phases of oscillators in one segment. @param[in] noise_size (uint): Threshold that defines noise - segments size (in pixels) that is less then the threshold is considered as a noise. @return (list) Object segments where each object segment consists of indexes of pixels that forms object segment.
def verify_certificate_issuer(self, certificate_issuer_id, **kwargs): # noqa: E501 """Verify certificate issuer. # noqa: E501 A utility API that can be used to validate the user configuration before activating a certificate issuer. Verifies that the certificate issuer is accessible and can be used to generate certificates by Device Management. <br> **Note:** The API requests the 3rd party CA to sign a test certificate. For some 3rd party CAs, this operation may make use of the account quota. <br> **Example usage:** ``` curl -X POST \\ -H 'authorization: <valid access token>' \\ -H 'content-type: application/json;charset=UTF-8' \\ https://api.us-east-1.mbedcloud.com/v3/certificate-issuers/01621a36719d507b9d48a91b00000000/verify ``` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.verify_certificate_issuer(certificate_issuer_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str certificate_issuer_id: Certificate issuer ID. <br> The ID of the certificate issuer. (required) :return: CertificateIssuerVerifyResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.verify_certificate_issuer_with_http_info(certificate_issuer_id, **kwargs) # noqa: E501 else: (data) = self.verify_certificate_issuer_with_http_info(certificate_issuer_id, **kwargs) # noqa: E501 return data
Verify certificate issuer. # noqa: E501 A utility API that can be used to validate the user configuration before activating a certificate issuer. Verifies that the certificate issuer is accessible and can be used to generate certificates by Device Management. <br> **Note:** The API requests the 3rd party CA to sign a test certificate. For some 3rd party CAs, this operation may make use of the account quota. <br> **Example usage:** ``` curl -X POST \\ -H 'authorization: <valid access token>' \\ -H 'content-type: application/json;charset=UTF-8' \\ https://api.us-east-1.mbedcloud.com/v3/certificate-issuers/01621a36719d507b9d48a91b00000000/verify ``` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.verify_certificate_issuer(certificate_issuer_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str certificate_issuer_id: Certificate issuer ID. <br> The ID of the certificate issuer. (required) :return: CertificateIssuerVerifyResponse If the method is called asynchronously, returns the request thread.
def evaluate(self): """Evaluate functional value of previous iteration.""" X = mp_Z_Y Xf = mp_Zf Df = mp_Df Sf = mp_Sf Ef = sl.inner(Df[np.newaxis, ...], Xf, axis=self.xstep.cri.axisM+1) - Sf Ef = np.swapaxes(Ef, 0, self.xstep.cri.axisK+1)[0] dfd = sl.rfl2norm2(Ef, self.xstep.S.shape, axis=self.xstep.cri.axisN)/2.0 rl1 = np.sum(np.abs(X)) obj = dfd + self.xstep.lmbda*rl1 return (obj, dfd, rl1)
Evaluate functional value of previous iteration.
def elapsed(self): """ Get elapsed time is seconds (float) """ # Clock stops running when total is reached if self.count == self.total: elapsed = self.last_update - self.start else: elapsed = time.time() - self.start return elapsed
Get elapsed time is seconds (float)
def set_editor(self, editor): """ Sets the associated editor, when the editor's offset calculator mode emit the signal pic_infos_available, the table is automatically refreshed. You can also refresh manually by calling :meth:`update_pic_infos`. """ if self._editor is not None: try: self._editor.offset_calculator.pic_infos_available.disconnect( self._update) except (AttributeError, RuntimeError, ReferenceError): # see https://github.com/OpenCobolIDE/OpenCobolIDE/issues/89 pass self._editor = weakref.proxy(editor) if editor else editor try: self._editor.offset_calculator.pic_infos_available.connect( self._update) except AttributeError: pass
Sets the associated editor, when the editor's offset calculator mode emit the signal pic_infos_available, the table is automatically refreshed. You can also refresh manually by calling :meth:`update_pic_infos`.
def parse_model_specifier(specifier): ''' Parses a string that specifies either a model or a field. The string should look like ``app.model.[field]``. >>> print parse_model_specifier('tests.TestModel') (<class 'tests.models.TestModel'>, None) >>> print parse_model_specifier('tests.TestModel.image') (<class 'tests.models.TestModel'>, 'image') :return: model and (optionally) field name :rtype: tuple of :py:class:`~django.db.models.Model` and str or None ''' values = specifier.split('.') if len(values) == 2: values.append(None) elif len(values) != 3: raise ValueError( 'Model specifier must be in app.model.[field] format. It' 'has {} parts instead of 2 or 3 (when split on ".")'.format( len(values) ) ) app_name, model_name, field_name = values model = get_model(app_name, model_name) if not model: raise ValueError( 'Model {} on app {} can not be found'.format( model_name, app_name, ) ) return model, field_name
Parses a string that specifies either a model or a field. The string should look like ``app.model.[field]``. >>> print parse_model_specifier('tests.TestModel') (<class 'tests.models.TestModel'>, None) >>> print parse_model_specifier('tests.TestModel.image') (<class 'tests.models.TestModel'>, 'image') :return: model and (optionally) field name :rtype: tuple of :py:class:`~django.db.models.Model` and str or None
def addSynapse(self, srcCellCol, srcCellIdx, perm): """Add a new synapse :param srcCellCol source cell column :param srcCellIdx source cell index within the column :param perm initial permanence """ self.syns.append([int(srcCellCol), int(srcCellIdx), numpy.float32(perm)])
Add a new synapse :param srcCellCol source cell column :param srcCellIdx source cell index within the column :param perm initial permanence
def setdefault(self, k, d=None): """Override dict.setdefault() to title-case keys.""" return super(HeaderDict, self).setdefault(k.title(), d)
Override dict.setdefault() to title-case keys.
def _define_output_buffers(self): """ Prepare a dictionary so we know what buffers have to be update with the the output of every step. """ # First define buffers that need input data self.target_buffers = { None: [(step, self.buffers[step]) for step in self._get_input_steps()] } # Go through all steps and append the buffers of their child nodes for step in self.steps_sorted: if step != self: child_steps = [edge[1] for edge in self.graph.out_edges(step)] self.target_buffers[step] = [(child_step, self.buffers[child_step]) for child_step in child_steps]
Prepare a dictionary so we know what buffers have to be update with the the output of every step.
def parse_for(control_line): """Returns name of loop control variable(s), iteration type (in/word_in) and expression to iterate on. For example: - given "for $i in $foo", returns (['i'], '$foo') - given "for ${i} in $(ls $foo)", returns (['i'], '$(ls $foo)') - given "for $k, $v in $foo", returns (['k', 'v'], '$foo') """ error = 'For loop call must be in form \'for $var in expression\', got: ' + control_line regex = re.compile(r'for\s+(\${?\S+}?)(?:\s*,\s+(\${?\S+}?))?\s+(in|word_in)\s+(\S.+)') res = regex.match(control_line) if not res: raise exceptions.YamlSyntaxError(error) groups = res.groups() control_vars = [] control_vars.append(get_var_name(groups[0])) if groups[1]: control_vars.append(get_var_name(groups[1])) iter_type = groups[2] expr = groups[3] return (control_vars, iter_type, expr)
Returns name of loop control variable(s), iteration type (in/word_in) and expression to iterate on. For example: - given "for $i in $foo", returns (['i'], '$foo') - given "for ${i} in $(ls $foo)", returns (['i'], '$(ls $foo)') - given "for $k, $v in $foo", returns (['k', 'v'], '$foo')
def global_var(self, name): """Inserts a new static (global) variable definition""" self.newline_label(name, False, True) self.newline_text("WORD\t1", True)
Inserts a new static (global) variable definition
def crop(self, extent, copy=False): """ Crop to a new depth range. Args: extent (tuple): The new start and stop depth. Must be 'inside' existing striplog. copy (bool): Whether to operate in place or make a copy. Returns: Operates in place by deault; if copy is True, returns a striplog. """ try: if extent[0] is None: extent = (self.start.z, extent[1]) if extent[1] is None: extent = (extent[0], self.stop.z) except: m = "You must provide a 2-tuple for the new extents. Use None for" m += " the existing start or stop." raise StriplogError(m) first_ix = self.read_at(extent[0], index=True) last_ix = self.read_at(extent[1], index=True) first = self[first_ix].split_at(extent[0])[1] last = self[last_ix].split_at(extent[1])[0] new_list = self.__list[first_ix:last_ix+1].copy() new_list[0] = first new_list[-1] = last if copy: return Striplog(new_list) else: self.__list = new_list return
Crop to a new depth range. Args: extent (tuple): The new start and stop depth. Must be 'inside' existing striplog. copy (bool): Whether to operate in place or make a copy. Returns: Operates in place by deault; if copy is True, returns a striplog.
def OnInsertCols(self, event): """Inserts the maximum of 1 and the number of selected columns""" bbox = self.grid.selection.get_bbox() if bbox is None or bbox[1][1] is None: # Insert rows at cursor ins_point = self.grid.actions.cursor[1] - 1 no_cols = 1 else: # Insert at right edge of bounding box ins_point = bbox[0][1] - 1 no_cols = self._get_no_rowscols(bbox)[1] with undo.group(_("Insert columns")): self.grid.actions.insert_cols(ins_point, no_cols) self.grid.GetTable().ResetView() # Update the default sized cell sizes self.grid.actions.zoom() event.Skip()
Inserts the maximum of 1 and the number of selected columns
def get_issns_for_journal(nlm_id): """Get a list of the ISSN numbers for a journal given its NLM ID. Information on NLM XML DTDs is available at https://www.nlm.nih.gov/databases/dtd/ """ params = {'db': 'nlmcatalog', 'retmode': 'xml', 'id': nlm_id} tree = send_request(pubmed_fetch, params) if tree is None: return None issn_list = tree.findall('.//ISSN') issn_linking = tree.findall('.//ISSNLinking') issns = issn_list + issn_linking # No ISSNs found! if not issns: return None else: return [issn.text for issn in issns]
Get a list of the ISSN numbers for a journal given its NLM ID. Information on NLM XML DTDs is available at https://www.nlm.nih.gov/databases/dtd/
def towgs84(E, N, pkm=False, presentation=None): """ Convert coordintes from TWD97 to WGS84 The east and north coordinates should be in meters and in float pkm true for Penghu, Kinmen and Matsu area You can specify one of the following presentations of the returned values: dms - A tuple with degrees (int), minutes (int) and seconds (float) dmsstr - [+/-]DDDΒ°MMM'DDD.DDDDD" (unicode) mindec - A tuple with degrees (int) and minutes (float) mindecstr - [+/-]DDDΒ°MMM.MMMMM' (unicode) (default)degdec - DDD.DDDDD (float) """ _lng0 = lng0pkm if pkm else lng0 E /= 1000.0 N /= 1000.0 epsilon = (N-N0) / (k0*A) eta = (E-E0) / (k0*A) epsilonp = epsilon - beta1*sin(2*1*epsilon)*cosh(2*1*eta) - \ beta2*sin(2*2*epsilon)*cosh(2*2*eta) - \ beta3*sin(2*3*epsilon)*cosh(2*3*eta) etap = eta - beta1*cos(2*1*epsilon)*sinh(2*1*eta) - \ beta2*cos(2*2*epsilon)*sinh(2*2*eta) - \ beta3*cos(2*3*epsilon)*sinh(2*3*eta) sigmap = 1 - 2*1*beta1*cos(2*1*epsilon)*cosh(2*1*eta) - \ 2*2*beta2*cos(2*2*epsilon)*cosh(2*2*eta) - \ 2*3*beta3*cos(2*3*epsilon)*cosh(2*3*eta) taup = 2*1*beta1*sin(2*1*epsilon)*sinh(2*1*eta) + \ 2*2*beta2*sin(2*2*epsilon)*sinh(2*2*eta) + \ 2*3*beta3*sin(2*3*epsilon)*sinh(2*3*eta) chi = asin(sin(epsilonp) / cosh(etap)) latitude = chi + delta1*sin(2*1*chi) + \ delta2*sin(2*2*chi) + \ delta3*sin(2*3*chi) longitude = _lng0 + atan(sinh(etap) / cos(epsilonp)) func = None presentation = 'to%s' % presentation if presentation else None if presentation in presentations: func = getattr(sys.modules[__name__], presentation) if func and func != 'todegdec': return func(degrees(latitude)), func(degrees(longitude)) return (degrees(latitude), degrees(longitude))
Convert coordintes from TWD97 to WGS84 The east and north coordinates should be in meters and in float pkm true for Penghu, Kinmen and Matsu area You can specify one of the following presentations of the returned values: dms - A tuple with degrees (int), minutes (int) and seconds (float) dmsstr - [+/-]DDDΒ°MMM'DDD.DDDDD" (unicode) mindec - A tuple with degrees (int) and minutes (float) mindecstr - [+/-]DDDΒ°MMM.MMMMM' (unicode) (default)degdec - DDD.DDDDD (float)
def state_get(self): """Return the internal state of the DataFrame in a dictionary Example: >>> import vaex >>> df = vaex.from_scalars(x=1, y=2) >>> df['r'] = (df.x**2 + df.y**2)**0.5 >>> df.state_get() {'active_range': [0, 1], 'column_names': ['x', 'y', 'r'], 'description': None, 'descriptions': {}, 'functions': {}, 'renamed_columns': [], 'selections': {'__filter__': None}, 'ucds': {}, 'units': {}, 'variables': {}, 'virtual_columns': {'r': '(((x ** 2) + (y ** 2)) ** 0.5)'}} """ virtual_names = list(self.virtual_columns.keys()) + list(self.variables.keys()) units = {key: str(value) for key, value in self.units.items()} ucds = {key: value for key, value in self.ucds.items() if key in virtual_names} descriptions = {key: value for key, value in self.descriptions.items()} import vaex.serialize def check(key, value): if not vaex.serialize.can_serialize(value.f): warnings.warn('Cannot serialize function for virtual column {} (use vaex.serialize.register)'.format(key)) return False return True def clean(value): return vaex.serialize.to_dict(value.f) functions = {key: clean(value) for key, value in self.functions.items() if check(key, value)} virtual_columns = {key: value for key, value in self.virtual_columns.items()} selections = {name: self.get_selection(name) for name, history in self.selection_histories.items()} selections = {name: selection.to_dict() if selection is not None else None for name, selection in selections.items()} # if selection is not None} state = dict(virtual_columns=virtual_columns, column_names=self.column_names, renamed_columns=self._renamed_columns, variables=self.variables, functions=functions, selections=selections, ucds=ucds, units=units, descriptions=descriptions, description=self.description, active_range=[self._index_start, self._index_end]) return state
Return the internal state of the DataFrame in a dictionary Example: >>> import vaex >>> df = vaex.from_scalars(x=1, y=2) >>> df['r'] = (df.x**2 + df.y**2)**0.5 >>> df.state_get() {'active_range': [0, 1], 'column_names': ['x', 'y', 'r'], 'description': None, 'descriptions': {}, 'functions': {}, 'renamed_columns': [], 'selections': {'__filter__': None}, 'ucds': {}, 'units': {}, 'variables': {}, 'virtual_columns': {'r': '(((x ** 2) + (y ** 2)) ** 0.5)'}}
def new(self, request): """Render a form to create a new object.""" form = (self.form or generate_form(self.model))() return self._render( request = request, template = 'new', context = { 'form': form }, status = 200 )
Render a form to create a new object.
def connect_cloudfront(self): "Connect to Cloud Front. This is done automatically for you when needed." self.conn_cloudfront = connect_cloudfront(self.AWS_ACCESS_KEY_ID, self.AWS_SECRET_ACCESS_KEY, debug=self.S3UTILS_DEBUG_LEVEL)
Connect to Cloud Front. This is done automatically for you when needed.
def filter_paragraphs(paragraphs, contains=None): """Filter paragraphs to only those containing one of a list of strings Parameters ---------- paragraphs : list of str List of plaintext paragraphs from an article contains : str or list of str Exclude paragraphs not containing this string as a token, or at least one of the strings in contains if it is a list Returns ------- str Plaintext consisting of all input paragraphs containing at least one of the supplied tokens. """ if contains is None: pattern = '' else: if isinstance(contains, str): contains = [contains] pattern = '|'.join(r'[^\w]%s[^\w]' % shortform for shortform in contains) paragraphs = [p for p in paragraphs if re.search(pattern, p)] return '\n'.join(paragraphs) + '\n'
Filter paragraphs to only those containing one of a list of strings Parameters ---------- paragraphs : list of str List of plaintext paragraphs from an article contains : str or list of str Exclude paragraphs not containing this string as a token, or at least one of the strings in contains if it is a list Returns ------- str Plaintext consisting of all input paragraphs containing at least one of the supplied tokens.
def begin_transaction(self, transaction_type, trace_parent=None): """Register the start of a transaction on the client """ return self.tracer.begin_transaction(transaction_type, trace_parent=trace_parent)
Register the start of a transaction on the client
def resolve_freezer(freezer): """ Locate the appropriate freezer given FREEZER or string input from the programmer. :param freezer: FREEZER constant or string for the freezer that is requested. (None = FREEZER.DEFAULT) :return: """ # Set default freezer if there was none if not freezer: return _Default() # Allow character based lookups as well if isinstance(freezer, six.string_types): cls = _freezer_lookup(freezer) return cls() # Allow plain class definition lookups (we instantiate the class) if freezer.__class__ == type.__class__: return freezer() # Warn when a custom freezer implementation is used. if freezer not in FREEZER.ALL: warn(u"Using custom freezer implelmentation: {0}".format(freezer)) return freezer
Locate the appropriate freezer given FREEZER or string input from the programmer. :param freezer: FREEZER constant or string for the freezer that is requested. (None = FREEZER.DEFAULT) :return:
def parse_rst(text: str) -> docutils.nodes.document: """Parse text assuming it's an RST markup.""" parser = docutils.parsers.rst.Parser() components = (docutils.parsers.rst.Parser,) settings = docutils.frontend.OptionParser(components=components).get_default_values() document = docutils.utils.new_document('<rst-doc>', settings=settings) parser.parse(text, document) return document
Parse text assuming it's an RST markup.
def use(parser, token): ''' Counterpart to `macro`, lets you render any block/macro in place. ''' args, kwargs = parser.parse_args(token) assert isinstance(args[0], ast.Str), \ 'First argument to "include" tag must be a string' name = args[0].s action = ast.YieldFrom( value=_a.Call(_a.Attribute(_a.Name('self'), name), [ _a.Name('context'), ]) ) if kwargs: kwargs = _wrap_kwargs(kwargs) return _create_with_scope([ast.Expr(value=action)], kwargs) return action
Counterpart to `macro`, lets you render any block/macro in place.
def read_binary(self, num, item_type='B'): """Parse the current buffer offset as the specified code.""" if 'B' in item_type: return self.read(num) if item_type[0] in ('@', '=', '<', '>', '!'): order = item_type[0] item_type = item_type[1:] else: order = '@' return list(self.read_struct(Struct(order + '{:d}'.format(int(num)) + item_type)))
Parse the current buffer offset as the specified code.
def setup(app): """Setup sphinx-gallery sphinx extension""" app.add_config_value('plot_gallery', True, 'html') app.add_config_value('abort_on_example_error', False, 'html') app.add_config_value('sphinx_gallery_conf', gallery_conf, 'html') app.add_stylesheet('gallery.css') app.connect('builder-inited', generate_gallery_rst) app.connect('build-finished', embed_code_links)
Setup sphinx-gallery sphinx extension
def search_stack_for_var(varname, verbose=util_arg.NOT_QUIET): """ Finds a varable (local or global) somewhere in the stack and returns the value Args: varname (str): variable name Returns: None if varname is not found else its value """ curr_frame = inspect.currentframe() if verbose: print(' * Searching parent frames for: ' + six.text_type(varname)) frame_no = 0 while curr_frame.f_back is not None: if varname in curr_frame.f_locals.keys(): if verbose: print(' * Found local in frame: ' + six.text_type(frame_no)) return curr_frame.f_locals[varname] if varname in curr_frame.f_globals.keys(): if verbose: print(' * Found global in frame: ' + six.text_type(frame_no)) return curr_frame.f_globals[varname] frame_no += 1 curr_frame = curr_frame.f_back if verbose: print('... Found nothing in all ' + six.text_type(frame_no) + ' frames.') return None
Finds a varable (local or global) somewhere in the stack and returns the value Args: varname (str): variable name Returns: None if varname is not found else its value
def init_storage(self): """Set current term to zero upon initialization & voted_for to None""" if not self.storage.exists('term'): self.storage.update({ 'term': 0, }) self.storage.update({ 'voted_for': None })
Set current term to zero upon initialization & voted_for to None
def to_dot(self, path: str, title: Optional[str] = None): """ Print the automaton to a dot file :param path: the path where to save the file. :param title: :return: """ g = graphviz.Digraph(format='svg') g.node('fake', style='invisible') for state in self._states: if state == self._initial_state: if state in self._accepting_states: g.node(str(state), root='true', shape='doublecircle') else: g.node(str(state), root='true') elif state in self._accepting_states: g.node(str(state), shape='doublecircle') else: g.node(str(state)) g.edge('fake', str(self._initial_state), style='bold') for start in self._transition_function: for symbol, end in self._transition_function[start].items(): g.edge(str(start), str(end), label=str(symbol)) if title: g.attr(label=title) g.attr(fontsize='20') g.render(filename=path) return
Print the automaton to a dot file :param path: the path where to save the file. :param title: :return:
def connected(self, node_id): """Return True iff the node_id is connected.""" conn = self._conns.get(node_id) if conn is None: return False return conn.connected()
Return True iff the node_id is connected.