text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def read(self, symbol, as_of=None, raw=False, **kwargs): # TODO: shall we block from_version from getting into super.read? """Read data for the named symbol. Returns a BitemporalItem object with a data and metdata element (as passed into write). Parameters ---------- symbol : `str` symbol name for the item as_of : `datetime.datetime` Return the data as it was as_of the point in time. raw : `bool` If True, will return the full bitemporal dataframe (i.e. all versions of the data). This also means as_of is ignored. Returns ------- BitemporalItem namedtuple which contains a .data and .metadata element """ item = self._store.read(symbol, **kwargs) last_updated = max(item.data.index.get_level_values(self.observe_column)) if raw: return BitemporalItem(symbol=symbol, library=self._store._arctic_lib.get_name(), data=item.data, metadata=item.metadata, last_updated=last_updated) else: index_names = list(item.data.index.names) index_names.remove(self.observe_column) return BitemporalItem(symbol=symbol, library=self._store._arctic_lib.get_name(), data=groupby_asof(item.data, as_of=as_of, dt_col=index_names, asof_col=self.observe_column), metadata=item.metadata, last_updated=last_updated)
[ "def", "read", "(", "self", ",", "symbol", ",", "as_of", "=", "None", ",", "raw", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# TODO: shall we block from_version from getting into super.read?", "item", "=", "self", ".", "_store", ".", "read", "(", "symbol", ",", "*", "*", "kwargs", ")", "last_updated", "=", "max", "(", "item", ".", "data", ".", "index", ".", "get_level_values", "(", "self", ".", "observe_column", ")", ")", "if", "raw", ":", "return", "BitemporalItem", "(", "symbol", "=", "symbol", ",", "library", "=", "self", ".", "_store", ".", "_arctic_lib", ".", "get_name", "(", ")", ",", "data", "=", "item", ".", "data", ",", "metadata", "=", "item", ".", "metadata", ",", "last_updated", "=", "last_updated", ")", "else", ":", "index_names", "=", "list", "(", "item", ".", "data", ".", "index", ".", "names", ")", "index_names", ".", "remove", "(", "self", ".", "observe_column", ")", "return", "BitemporalItem", "(", "symbol", "=", "symbol", ",", "library", "=", "self", ".", "_store", ".", "_arctic_lib", ".", "get_name", "(", ")", ",", "data", "=", "groupby_asof", "(", "item", ".", "data", ",", "as_of", "=", "as_of", ",", "dt_col", "=", "index_names", ",", "asof_col", "=", "self", ".", "observe_column", ")", ",", "metadata", "=", "item", ".", "metadata", ",", "last_updated", "=", "last_updated", ")" ]
49.25
27.25
def _pprint(params, offset=0, printer=repr): """Pretty print the dictionary 'params' Parameters ---------- params: dict The dictionary to pretty print offset: int The offset in characters to add at the begin of each line. printer: The function to convert entries to strings, typically the builtin str or repr """ # Do a multi-line justified repr: options = np.get_printoptions() np.set_printoptions(precision=5, threshold=64, edgeitems=2) params_list = list() this_line_length = offset line_sep = ',\n' + (1 + offset // 2) * ' ' for i, (k, v) in enumerate(sorted(six.iteritems(params))): if type(v) is float: # use str for representing floating point numbers # this way we get consistent representation across # architectures and versions. this_repr = '%s=%s' % (k, str(v)) else: # use repr of the rest this_repr = '%s=%s' % (k, printer(v)) if len(this_repr) > 500: this_repr = this_repr[:300] + '...' + this_repr[-100:] if i > 0: if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr): params_list.append(line_sep) this_line_length = len(line_sep) else: params_list.append(', ') this_line_length += 2 params_list.append(this_repr) this_line_length += len(this_repr) np.set_printoptions(**options) lines = ''.join(params_list) # Strip trailing space to avoid nightmare in doctests lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n')) return lines
[ "def", "_pprint", "(", "params", ",", "offset", "=", "0", ",", "printer", "=", "repr", ")", ":", "# Do a multi-line justified repr:", "options", "=", "np", ".", "get_printoptions", "(", ")", "np", ".", "set_printoptions", "(", "precision", "=", "5", ",", "threshold", "=", "64", ",", "edgeitems", "=", "2", ")", "params_list", "=", "list", "(", ")", "this_line_length", "=", "offset", "line_sep", "=", "',\\n'", "+", "(", "1", "+", "offset", "//", "2", ")", "*", "' '", "for", "i", ",", "(", "k", ",", "v", ")", "in", "enumerate", "(", "sorted", "(", "six", ".", "iteritems", "(", "params", ")", ")", ")", ":", "if", "type", "(", "v", ")", "is", "float", ":", "# use str for representing floating point numbers", "# this way we get consistent representation across", "# architectures and versions.", "this_repr", "=", "'%s=%s'", "%", "(", "k", ",", "str", "(", "v", ")", ")", "else", ":", "# use repr of the rest", "this_repr", "=", "'%s=%s'", "%", "(", "k", ",", "printer", "(", "v", ")", ")", "if", "len", "(", "this_repr", ")", ">", "500", ":", "this_repr", "=", "this_repr", "[", ":", "300", "]", "+", "'...'", "+", "this_repr", "[", "-", "100", ":", "]", "if", "i", ">", "0", ":", "if", "(", "this_line_length", "+", "len", "(", "this_repr", ")", ">=", "75", "or", "'\\n'", "in", "this_repr", ")", ":", "params_list", ".", "append", "(", "line_sep", ")", "this_line_length", "=", "len", "(", "line_sep", ")", "else", ":", "params_list", ".", "append", "(", "', '", ")", "this_line_length", "+=", "2", "params_list", ".", "append", "(", "this_repr", ")", "this_line_length", "+=", "len", "(", "this_repr", ")", "np", ".", "set_printoptions", "(", "*", "*", "options", ")", "lines", "=", "''", ".", "join", "(", "params_list", ")", "# Strip trailing space to avoid nightmare in doctests", "lines", "=", "'\\n'", ".", "join", "(", "l", ".", "rstrip", "(", "' '", ")", "for", "l", "in", "lines", ".", "split", "(", "'\\n'", ")", ")", "return", "lines" ]
34.375
16.583333
def cmd_map(args): '''map command''' child = multiprocessing.Process(target=map_process, args=[args]) child.start()
[ "def", "cmd_map", "(", "args", ")", ":", "child", "=", "multiprocessing", ".", "Process", "(", "target", "=", "map_process", ",", "args", "=", "[", "args", "]", ")", "child", ".", "start", "(", ")" ]
31
23
def _get_curie_and_type_from_id(variant_id): """ Given a variant id, our best guess at its curie and type (snp, haplotype, etc) 'None' will be used for both curie and type for IDs that we can't process :param variant_id: :return: """ curie = None variant_type = None # remove space before hyphens variant_id = re.sub(r' -', '-', variant_id).strip() if re.search(r' x ', variant_id) or re.search(r',', variant_id): # TODO deal with rs1234 x rs234... (haplotypes?) LOG.warning("Cannot parse variant groups of this format: %s", variant_id) elif re.search(r';', variant_id): curie = ':haplotype_' + Source.hash_id(variant_id) # deliberate 404 variant_type = "haplotype" elif variant_id[:2] == 'rs': curie = 'dbSNP:' + variant_id.split('-')[0] # curie = re.sub(r'-.*$', '', curie).strip() variant_type = "snp" # remove the alteration elif variant_id[:3] == 'kgp': # http://www.1000genomes.org/faq/what-are-kgp-identifiers curie = ':kgp-' + variant_id # deliberate 404 variant_type = "snp" elif variant_id[:3] == 'chr': # like: chr10:106180121-G # variant_id = re.sub(r'-?', '-N', variant_id) variant_id = re.sub(r' ', '', variant_id) curie = ':gwas-' + re.sub(r':', '-', variant_id) # deliberate 404 variant_type = "snp" elif variant_id.strip() == '': pass else: LOG.warning("There's a snp id i can't manage: %s", variant_id) return curie, variant_type
[ "def", "_get_curie_and_type_from_id", "(", "variant_id", ")", ":", "curie", "=", "None", "variant_type", "=", "None", "# remove space before hyphens", "variant_id", "=", "re", ".", "sub", "(", "r' -'", ",", "'-'", ",", "variant_id", ")", ".", "strip", "(", ")", "if", "re", ".", "search", "(", "r' x '", ",", "variant_id", ")", "or", "re", ".", "search", "(", "r','", ",", "variant_id", ")", ":", "# TODO deal with rs1234 x rs234... (haplotypes?)", "LOG", ".", "warning", "(", "\"Cannot parse variant groups of this format: %s\"", ",", "variant_id", ")", "elif", "re", ".", "search", "(", "r';'", ",", "variant_id", ")", ":", "curie", "=", "':haplotype_'", "+", "Source", ".", "hash_id", "(", "variant_id", ")", "# deliberate 404", "variant_type", "=", "\"haplotype\"", "elif", "variant_id", "[", ":", "2", "]", "==", "'rs'", ":", "curie", "=", "'dbSNP:'", "+", "variant_id", ".", "split", "(", "'-'", ")", "[", "0", "]", "# curie = re.sub(r'-.*$', '', curie).strip()", "variant_type", "=", "\"snp\"", "# remove the alteration", "elif", "variant_id", "[", ":", "3", "]", "==", "'kgp'", ":", "# http://www.1000genomes.org/faq/what-are-kgp-identifiers", "curie", "=", "':kgp-'", "+", "variant_id", "# deliberate 404", "variant_type", "=", "\"snp\"", "elif", "variant_id", "[", ":", "3", "]", "==", "'chr'", ":", "# like: chr10:106180121-G", "#", "variant_id", "=", "re", ".", "sub", "(", "r'-?'", ",", "'-N'", ",", "variant_id", ")", "variant_id", "=", "re", ".", "sub", "(", "r' '", ",", "''", ",", "variant_id", ")", "curie", "=", "':gwas-'", "+", "re", ".", "sub", "(", "r':'", ",", "'-'", ",", "variant_id", ")", "# deliberate 404", "variant_type", "=", "\"snp\"", "elif", "variant_id", ".", "strip", "(", ")", "==", "''", ":", "pass", "else", ":", "LOG", ".", "warning", "(", "\"There's a snp id i can't manage: %s\"", ",", "variant_id", ")", "return", "curie", ",", "variant_type" ]
42.225
17.875
def get_nearest_edges(G, X, Y, method=None, dist=0.0001): """ Return the graph edges nearest to a list of points. Pass in points as separate vectors of X and Y coordinates. The 'kdtree' method is by far the fastest with large data sets, but only finds approximate nearest edges if working in unprojected coordinates like lat-lng (it precisely finds the nearest edge if working in projected coordinates). The 'balltree' method is second fastest with large data sets, but it is precise if working in unprojected coordinates like lat-lng. Parameters ---------- G : networkx multidigraph X : list-like The vector of longitudes or x's for which we will find the nearest edge in the graph. For projected graphs, use the projected coordinates, usually in meters. Y : list-like The vector of latitudes or y's for which we will find the nearest edge in the graph. For projected graphs, use the projected coordinates, usually in meters. method : str {None, 'kdtree', 'balltree'} Which method to use for finding nearest edge to each point. If None, we manually find each edge one at a time using osmnx.utils.get_nearest_edge. If 'kdtree' we use scipy.spatial.cKDTree for very fast euclidean search. Recommended for projected graphs. If 'balltree', we use sklearn.neighbors.BallTree for fast haversine search. Recommended for unprojected graphs. dist : float spacing length along edges. Units are the same as the geom; Degrees for unprojected geometries and meters for projected geometries. The smaller the value, the more points are created. Returns ------- ne : ndarray array of nearest edges represented by their startpoint and endpoint ids, u and v, the OSM ids of the nodes. Info ---- The method creates equally distanced points along the edges of the network. Then, these points are used in a kdTree or BallTree search to identify which is nearest.Note that this method will not give the exact perpendicular point along the edge, but the smaller the *dist* parameter, the closer the solution will be. Code is adapted from an answer by JHuw from this original question: https://gis.stackexchange.com/questions/222315/geopandas-find-nearest-point -in-other-dataframe """ start_time = time.time() if method is None: # calculate nearest edge one at a time for each point ne = [get_nearest_edge(G, (x, y)) for x, y in zip(X, Y)] ne = [(u, v) for _, u, v in ne] elif method == 'kdtree': # check if we were able to import scipy.spatial.cKDTree successfully if not cKDTree: raise ImportError('The scipy package must be installed to use this optional feature.') # transform graph into DataFrame edges = graph_to_gdfs(G, nodes=False, fill_edge_geometry=True) # transform edges into evenly spaced points edges['points'] = edges.apply(lambda x: redistribute_vertices(x.geometry, dist), axis=1) # develop edges data for each created points extended = edges['points'].apply([pd.Series]).stack().reset_index(level=1, drop=True).join(edges).reset_index() # Prepare btree arrays nbdata = np.array(list(zip(extended['Series'].apply(lambda x: x.x), extended['Series'].apply(lambda x: x.y)))) # build a k-d tree for euclidean nearest node search btree = cKDTree(data=nbdata, compact_nodes=True, balanced_tree=True) # query the tree for nearest node to each point points = np.array([X, Y]).T dist, idx = btree.query(points, k=1) # Returns ids of closest point eidx = extended.loc[idx, 'index'] ne = edges.loc[eidx, ['u', 'v']] elif method == 'balltree': # check if we were able to import sklearn.neighbors.BallTree successfully if not BallTree: raise ImportError('The scikit-learn package must be installed to use this optional feature.') # transform graph into DataFrame edges = graph_to_gdfs(G, nodes=False, fill_edge_geometry=True) # transform edges into evenly spaced points edges['points'] = edges.apply(lambda x: redistribute_vertices(x.geometry, dist), axis=1) # develop edges data for each created points extended = edges['points'].apply([pd.Series]).stack().reset_index(level=1, drop=True).join(edges).reset_index() # haversine requires data in form of [lat, lng] and inputs/outputs in units of radians nodes = pd.DataFrame({'x': extended['Series'].apply(lambda x: x.x), 'y': extended['Series'].apply(lambda x: x.y)}) nodes_rad = np.deg2rad(nodes[['y', 'x']].values.astype(np.float)) points = np.array([Y, X]).T points_rad = np.deg2rad(points) # build a ball tree for haversine nearest node search tree = BallTree(nodes_rad, metric='haversine') # query the tree for nearest node to each point idx = tree.query(points_rad, k=1, return_distance=False) eidx = extended.loc[idx[:, 0], 'index'] ne = edges.loc[eidx, ['u', 'v']] else: raise ValueError('You must pass a valid method name, or None.') log('Found nearest edges to {:,} points in {:,.2f} seconds'.format(len(X), time.time() - start_time)) return np.array(ne)
[ "def", "get_nearest_edges", "(", "G", ",", "X", ",", "Y", ",", "method", "=", "None", ",", "dist", "=", "0.0001", ")", ":", "start_time", "=", "time", ".", "time", "(", ")", "if", "method", "is", "None", ":", "# calculate nearest edge one at a time for each point", "ne", "=", "[", "get_nearest_edge", "(", "G", ",", "(", "x", ",", "y", ")", ")", "for", "x", ",", "y", "in", "zip", "(", "X", ",", "Y", ")", "]", "ne", "=", "[", "(", "u", ",", "v", ")", "for", "_", ",", "u", ",", "v", "in", "ne", "]", "elif", "method", "==", "'kdtree'", ":", "# check if we were able to import scipy.spatial.cKDTree successfully", "if", "not", "cKDTree", ":", "raise", "ImportError", "(", "'The scipy package must be installed to use this optional feature.'", ")", "# transform graph into DataFrame", "edges", "=", "graph_to_gdfs", "(", "G", ",", "nodes", "=", "False", ",", "fill_edge_geometry", "=", "True", ")", "# transform edges into evenly spaced points", "edges", "[", "'points'", "]", "=", "edges", ".", "apply", "(", "lambda", "x", ":", "redistribute_vertices", "(", "x", ".", "geometry", ",", "dist", ")", ",", "axis", "=", "1", ")", "# develop edges data for each created points", "extended", "=", "edges", "[", "'points'", "]", ".", "apply", "(", "[", "pd", ".", "Series", "]", ")", ".", "stack", "(", ")", ".", "reset_index", "(", "level", "=", "1", ",", "drop", "=", "True", ")", ".", "join", "(", "edges", ")", ".", "reset_index", "(", ")", "# Prepare btree arrays", "nbdata", "=", "np", ".", "array", "(", "list", "(", "zip", "(", "extended", "[", "'Series'", "]", ".", "apply", "(", "lambda", "x", ":", "x", ".", "x", ")", ",", "extended", "[", "'Series'", "]", ".", "apply", "(", "lambda", "x", ":", "x", ".", "y", ")", ")", ")", ")", "# build a k-d tree for euclidean nearest node search", "btree", "=", "cKDTree", "(", "data", "=", "nbdata", ",", "compact_nodes", "=", "True", ",", "balanced_tree", "=", "True", ")", "# query the tree for nearest node to each point", "points", "=", "np", ".", "array", "(", "[", "X", ",", "Y", "]", ")", ".", "T", "dist", ",", "idx", "=", "btree", ".", "query", "(", "points", ",", "k", "=", "1", ")", "# Returns ids of closest point", "eidx", "=", "extended", ".", "loc", "[", "idx", ",", "'index'", "]", "ne", "=", "edges", ".", "loc", "[", "eidx", ",", "[", "'u'", ",", "'v'", "]", "]", "elif", "method", "==", "'balltree'", ":", "# check if we were able to import sklearn.neighbors.BallTree successfully", "if", "not", "BallTree", ":", "raise", "ImportError", "(", "'The scikit-learn package must be installed to use this optional feature.'", ")", "# transform graph into DataFrame", "edges", "=", "graph_to_gdfs", "(", "G", ",", "nodes", "=", "False", ",", "fill_edge_geometry", "=", "True", ")", "# transform edges into evenly spaced points", "edges", "[", "'points'", "]", "=", "edges", ".", "apply", "(", "lambda", "x", ":", "redistribute_vertices", "(", "x", ".", "geometry", ",", "dist", ")", ",", "axis", "=", "1", ")", "# develop edges data for each created points", "extended", "=", "edges", "[", "'points'", "]", ".", "apply", "(", "[", "pd", ".", "Series", "]", ")", ".", "stack", "(", ")", ".", "reset_index", "(", "level", "=", "1", ",", "drop", "=", "True", ")", ".", "join", "(", "edges", ")", ".", "reset_index", "(", ")", "# haversine requires data in form of [lat, lng] and inputs/outputs in units of radians", "nodes", "=", "pd", ".", "DataFrame", "(", "{", "'x'", ":", "extended", "[", "'Series'", "]", ".", "apply", "(", "lambda", "x", ":", "x", ".", "x", ")", ",", "'y'", ":", "extended", "[", "'Series'", "]", ".", "apply", "(", "lambda", "x", ":", "x", ".", "y", ")", "}", ")", "nodes_rad", "=", "np", ".", "deg2rad", "(", "nodes", "[", "[", "'y'", ",", "'x'", "]", "]", ".", "values", ".", "astype", "(", "np", ".", "float", ")", ")", "points", "=", "np", ".", "array", "(", "[", "Y", ",", "X", "]", ")", ".", "T", "points_rad", "=", "np", ".", "deg2rad", "(", "points", ")", "# build a ball tree for haversine nearest node search", "tree", "=", "BallTree", "(", "nodes_rad", ",", "metric", "=", "'haversine'", ")", "# query the tree for nearest node to each point", "idx", "=", "tree", ".", "query", "(", "points_rad", ",", "k", "=", "1", ",", "return_distance", "=", "False", ")", "eidx", "=", "extended", ".", "loc", "[", "idx", "[", ":", ",", "0", "]", ",", "'index'", "]", "ne", "=", "edges", ".", "loc", "[", "eidx", ",", "[", "'u'", ",", "'v'", "]", "]", "else", ":", "raise", "ValueError", "(", "'You must pass a valid method name, or None.'", ")", "log", "(", "'Found nearest edges to {:,} points in {:,.2f} seconds'", ".", "format", "(", "len", "(", "X", ")", ",", "time", ".", "time", "(", ")", "-", "start_time", ")", ")", "return", "np", ".", "array", "(", "ne", ")" ]
43.813008
28.853659
def plot_mesh( x, y, z, color=default_color, wireframe=True, surface=True, wrapx=False, wrapy=False, u=None, v=None, texture=None ): """Draws a 2d wireframe+surface in 3d: generalization of :any:`plot_wireframe` and :any:`plot_surface`. :param x: {x2d} :param y: {y2d} :param z: {z2d} :param color: {color2d} :param bool wireframe: draw lines between the vertices :param bool surface: draw faces/triangles between the vertices :param bool wrapx: when True, the x direction is assumed to wrap, and polygons are drawn between the begin and end points :param boool wrapy: idem for y :param u: {u} :param v: {v} :param texture: {texture} :return: :any:`Mesh` """ fig = gcf() # assert len(x.shape) == 2 # assert len(y.shape) == 2 # assert len(z.shape) == 2 # if isinstance(color, np.ndarray): # assert len(color.shape) == 3 # assert color.shape[:2] == x.shape # color = color.reshape(-1) def dim(x): d = 0 el = x while True: try: el = el[0] d += 1 except: break return d if dim(x) == 2: nx, ny = x.shape else: nx, ny = x[0].shape # assert len(x.shape) == 2, "Array x must be 2 dimensional." # assert len(y.shape) == 2, "Array y must be 2 dimensional." # assert len(z.shape) == 2, "Array z must be 2 dimensional." # assert x.shape == y.shape, "Arrays x and y must have same shape." # assert y.shape == z.shape, "Arrays y and z must have same shape." # convert x, y, z from shape (nx, ny) to (nx * ny) or # (frame, nx, ny) to (frame, nx*ny) def reshape(ar): if dim(ar) == 3: return [k.reshape(-1) for k in ar] else: return ar.reshape(-1) x = reshape(x) y = reshape(y) z = reshape(z) # similar for texture coordinates if u is not None: u = reshape(u) if v is not None: v = reshape(v) # convert color from shape (nx, ny, {3,4}) to (nx * ny, {3, 4}) or # (frame, nx, ny, {3,4}) to (frame, nx*ny, {3,4}) def reshape_color(ar): if dim(ar) == 4: return [k.reshape(-1, k.shape[-1]) for k in ar] else: return ar.reshape(-1, ar.shape[-1]) if isinstance(color, np.ndarray): color = reshape_color(color) _grow_limits(np.array(x).reshape(-1), np.array(y).reshape(-1), np.array(z).reshape(-1)) triangles, lines = _make_triangles_lines((nx, ny), wrapx, wrapy) mesh = ipv.Mesh( x=x, y=y, z=z, triangles=triangles if surface else None, color=color, lines=lines if wireframe else None, u=u, v=v, texture=texture, ) fig.meshes = fig.meshes + [mesh] return mesh
[ "def", "plot_mesh", "(", "x", ",", "y", ",", "z", ",", "color", "=", "default_color", ",", "wireframe", "=", "True", ",", "surface", "=", "True", ",", "wrapx", "=", "False", ",", "wrapy", "=", "False", ",", "u", "=", "None", ",", "v", "=", "None", ",", "texture", "=", "None", ")", ":", "fig", "=", "gcf", "(", ")", "# assert len(x.shape) == 2", "# assert len(y.shape) == 2", "# assert len(z.shape) == 2", "# if isinstance(color, np.ndarray):", "# \tassert len(color.shape) == 3", "# \tassert color.shape[:2] == x.shape", "# \tcolor = color.reshape(-1)", "def", "dim", "(", "x", ")", ":", "d", "=", "0", "el", "=", "x", "while", "True", ":", "try", ":", "el", "=", "el", "[", "0", "]", "d", "+=", "1", "except", ":", "break", "return", "d", "if", "dim", "(", "x", ")", "==", "2", ":", "nx", ",", "ny", "=", "x", ".", "shape", "else", ":", "nx", ",", "ny", "=", "x", "[", "0", "]", ".", "shape", "# assert len(x.shape) == 2, \"Array x must be 2 dimensional.\"", "# assert len(y.shape) == 2, \"Array y must be 2 dimensional.\"", "# assert len(z.shape) == 2, \"Array z must be 2 dimensional.\"", "# assert x.shape == y.shape, \"Arrays x and y must have same shape.\"", "# assert y.shape == z.shape, \"Arrays y and z must have same shape.\"", "# convert x, y, z from shape (nx, ny) to (nx * ny) or", "# (frame, nx, ny) to (frame, nx*ny)", "def", "reshape", "(", "ar", ")", ":", "if", "dim", "(", "ar", ")", "==", "3", ":", "return", "[", "k", ".", "reshape", "(", "-", "1", ")", "for", "k", "in", "ar", "]", "else", ":", "return", "ar", ".", "reshape", "(", "-", "1", ")", "x", "=", "reshape", "(", "x", ")", "y", "=", "reshape", "(", "y", ")", "z", "=", "reshape", "(", "z", ")", "# similar for texture coordinates", "if", "u", "is", "not", "None", ":", "u", "=", "reshape", "(", "u", ")", "if", "v", "is", "not", "None", ":", "v", "=", "reshape", "(", "v", ")", "# convert color from shape (nx, ny, {3,4}) to (nx * ny, {3, 4}) or", "# (frame, nx, ny, {3,4}) to (frame, nx*ny, {3,4})", "def", "reshape_color", "(", "ar", ")", ":", "if", "dim", "(", "ar", ")", "==", "4", ":", "return", "[", "k", ".", "reshape", "(", "-", "1", ",", "k", ".", "shape", "[", "-", "1", "]", ")", "for", "k", "in", "ar", "]", "else", ":", "return", "ar", ".", "reshape", "(", "-", "1", ",", "ar", ".", "shape", "[", "-", "1", "]", ")", "if", "isinstance", "(", "color", ",", "np", ".", "ndarray", ")", ":", "color", "=", "reshape_color", "(", "color", ")", "_grow_limits", "(", "np", ".", "array", "(", "x", ")", ".", "reshape", "(", "-", "1", ")", ",", "np", ".", "array", "(", "y", ")", ".", "reshape", "(", "-", "1", ")", ",", "np", ".", "array", "(", "z", ")", ".", "reshape", "(", "-", "1", ")", ")", "triangles", ",", "lines", "=", "_make_triangles_lines", "(", "(", "nx", ",", "ny", ")", ",", "wrapx", ",", "wrapy", ")", "mesh", "=", "ipv", ".", "Mesh", "(", "x", "=", "x", ",", "y", "=", "y", ",", "z", "=", "z", ",", "triangles", "=", "triangles", "if", "surface", "else", "None", ",", "color", "=", "color", ",", "lines", "=", "lines", "if", "wireframe", "else", "None", ",", "u", "=", "u", ",", "v", "=", "v", ",", "texture", "=", "texture", ",", ")", "fig", ".", "meshes", "=", "fig", ".", "meshes", "+", "[", "mesh", "]", "return", "mesh" ]
29.913043
21.847826
def _set_axis_limits(self, which, lims, d, scale, reverse=False): """Private method for setting axis limits. Sets the axis limits on each axis for an individual plot. Args: which (str): The indicator of which part of the plots to adjust. This currently handles `x` and `y`. lims (len-2 list of floats): The limits for the axis. d (float): Amount to increment by between the limits. scale (str): Scale of the axis. Either `log` or `lin`. reverse (bool, optional): If True, reverse the axis tick marks. Default is False. """ setattr(self.limits, which + 'lims', lims) setattr(self.limits, 'd' + which, d) setattr(self.limits, which + 'scale', scale) if reverse: setattr(self.limits, 'reverse_' + which + '_axis', True) return
[ "def", "_set_axis_limits", "(", "self", ",", "which", ",", "lims", ",", "d", ",", "scale", ",", "reverse", "=", "False", ")", ":", "setattr", "(", "self", ".", "limits", ",", "which", "+", "'lims'", ",", "lims", ")", "setattr", "(", "self", ".", "limits", ",", "'d'", "+", "which", ",", "d", ")", "setattr", "(", "self", ".", "limits", ",", "which", "+", "'scale'", ",", "scale", ")", "if", "reverse", ":", "setattr", "(", "self", ".", "limits", ",", "'reverse_'", "+", "which", "+", "'_axis'", ",", "True", ")", "return" ]
41.285714
24.47619
def D(self): r"""Differential operator (for gradient and divergence). Is computed by :func:`compute_differential_operator`. """ if self._D is None: self.logger.warning('The differential operator G.D is not ' 'available, we need to compute it. Explicitly ' 'call G.compute_differential_operator() ' 'once beforehand to suppress the warning.') self.compute_differential_operator() return self._D
[ "def", "D", "(", "self", ")", ":", "if", "self", ".", "_D", "is", "None", ":", "self", ".", "logger", ".", "warning", "(", "'The differential operator G.D is not '", "'available, we need to compute it. Explicitly '", "'call G.compute_differential_operator() '", "'once beforehand to suppress the warning.'", ")", "self", ".", "compute_differential_operator", "(", ")", "return", "self", ".", "_D" ]
45.25
22.166667
def log_p(self,z): """ The unnormalized log posterior components (the quantity we want to approximate) RAO-BLACKWELLIZED! """ return np.array([self.log_p_blanket(i) for i in z])
[ "def", "log_p", "(", "self", ",", "z", ")", ":", "return", "np", ".", "array", "(", "[", "self", ".", "log_p_blanket", "(", "i", ")", "for", "i", "in", "z", "]", ")" ]
36.333333
17
def sgraph(N_clusters_max, file_name): """Runs METIS or hMETIS and returns the labels found by those (hyper-)graph partitioning algorithms. Parameters ---------- N_clusters_max : int file_name : string Returns ------- labels : array of shape (n_samples,) A vector of labels denoting the cluster to which each sample has been assigned as a result of any of three approximation algorithms for consensus clustering (either of CSPA, HGPA or MCLA). """ if file_name == 'DO_NOT_PROCESS': return [] print('\n#') k = str(N_clusters_max) out_name = file_name + '.part.' + k if file_name == 'wgraph_HGPA': print("INFO: Cluster_Ensembles: sgraph: " "calling shmetis for hypergraph partitioning.") if sys.platform.startswith('linux'): shmetis_path = pkg_resources.resource_filename(__name__, 'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis') elif sys.platform.startswith('darwin'): shmetis_path = pkg_resources.resource_filename(__name__, 'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis') else: print("ERROR: Cluster_Ensembles: sgraph:\n" "your platform is not supported. Some code required for graph partition " "is only available for Linux distributions and OS X.") sys.exit(1) args = "{0} ./".format(shmetis_path) + file_name + " " + k + " 15" subprocess.call(args, shell = True) elif file_name == 'wgraph_CSPA' or file_name == 'wgraph_MCLA': print("INFO: Cluster_Ensembles: sgraph: " "calling gpmetis for graph partitioning.") args = "gpmetis ./" + file_name + " " + k subprocess.call(args, shell = True) else: raise NameError("ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable " "file-name.".format(file_name)) labels = np.empty(0, dtype = int) with open(out_name, 'r') as file: print("INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; " "loading {}".format(out_name)) labels = np.loadtxt(out_name, dtype = int) labels = labels.reshape(labels.size) labels = one_to_max(labels) subprocess.call(['rm', out_name]) print('#') return labels
[ "def", "sgraph", "(", "N_clusters_max", ",", "file_name", ")", ":", "if", "file_name", "==", "'DO_NOT_PROCESS'", ":", "return", "[", "]", "print", "(", "'\\n#'", ")", "k", "=", "str", "(", "N_clusters_max", ")", "out_name", "=", "file_name", "+", "'.part.'", "+", "k", "if", "file_name", "==", "'wgraph_HGPA'", ":", "print", "(", "\"INFO: Cluster_Ensembles: sgraph: \"", "\"calling shmetis for hypergraph partitioning.\"", ")", "if", "sys", ".", "platform", ".", "startswith", "(", "'linux'", ")", ":", "shmetis_path", "=", "pkg_resources", ".", "resource_filename", "(", "__name__", ",", "'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis'", ")", "elif", "sys", ".", "platform", ".", "startswith", "(", "'darwin'", ")", ":", "shmetis_path", "=", "pkg_resources", ".", "resource_filename", "(", "__name__", ",", "'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis'", ")", "else", ":", "print", "(", "\"ERROR: Cluster_Ensembles: sgraph:\\n\"", "\"your platform is not supported. Some code required for graph partition \"", "\"is only available for Linux distributions and OS X.\"", ")", "sys", ".", "exit", "(", "1", ")", "args", "=", "\"{0} ./\"", ".", "format", "(", "shmetis_path", ")", "+", "file_name", "+", "\" \"", "+", "k", "+", "\" 15\"", "subprocess", ".", "call", "(", "args", ",", "shell", "=", "True", ")", "elif", "file_name", "==", "'wgraph_CSPA'", "or", "file_name", "==", "'wgraph_MCLA'", ":", "print", "(", "\"INFO: Cluster_Ensembles: sgraph: \"", "\"calling gpmetis for graph partitioning.\"", ")", "args", "=", "\"gpmetis ./\"", "+", "file_name", "+", "\" \"", "+", "k", "subprocess", ".", "call", "(", "args", ",", "shell", "=", "True", ")", "else", ":", "raise", "NameError", "(", "\"ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable \"", "\"file-name.\"", ".", "format", "(", "file_name", ")", ")", "labels", "=", "np", ".", "empty", "(", "0", ",", "dtype", "=", "int", ")", "with", "open", "(", "out_name", ",", "'r'", ")", "as", "file", ":", "print", "(", "\"INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; \"", "\"loading {}\"", ".", "format", "(", "out_name", ")", ")", "labels", "=", "np", ".", "loadtxt", "(", "out_name", ",", "dtype", "=", "int", ")", "labels", "=", "labels", ".", "reshape", "(", "labels", ".", "size", ")", "labels", "=", "one_to_max", "(", "labels", ")", "subprocess", ".", "call", "(", "[", "'rm'", ",", "out_name", "]", ")", "print", "(", "'#'", ")", "return", "labels" ]
37.246154
21.907692
def set_glitch_filter(self, user_gpio, steady): """ Sets a glitch filter on a GPIO. Level changes on the GPIO are not reported unless the level has been stable for at least [*steady*] microseconds. The level is then reported. Level changes of less than [*steady*] microseconds are ignored. user_gpio:= 0-31 steady:= 0-300000 Returns 0 if OK, otherwise PI_BAD_USER_GPIO, or PI_BAD_FILTER. This filter affects the GPIO samples returned to callbacks set up with [*callback*] and [*wait_for_edge*]. It does not affect levels read by [*read*], [*read_bank_1*], or [*read_bank_2*]. Each (stable) edge will be timestamped [*steady*] microseconds after it was first detected. ... pi.set_glitch_filter(23, 100) ... """ res = yield from self._pigpio_aio_command(_PI_CMD_FG, user_gpio, steady) return _u2i(res)
[ "def", "set_glitch_filter", "(", "self", ",", "user_gpio", ",", "steady", ")", ":", "res", "=", "yield", "from", "self", ".", "_pigpio_aio_command", "(", "_PI_CMD_FG", ",", "user_gpio", ",", "steady", ")", "return", "_u2i", "(", "res", ")" ]
43.227273
16.136364
def substitute(self, text, subvars={}, checkmissing=None): '''substitute variables in a string''' if checkmissing is None: checkmissing = self.checkmissing while True: idx = text.find(self.start_var_token) if idx == -1: return text endidx = text[idx:].find(self.end_var_token) if endidx == -1: raise MAVSubstituteError('missing end of variable: %s' % text[idx:idx+10]) varname = text[idx+2:idx+endidx] fullvar = varname # allow default value after a : def_value = None colon = varname.find(':') if colon != -1: def_value = varname[colon+1:] varname = varname[:colon] if varname in subvars: value = subvars[varname] elif def_value is not None: value = def_value elif checkmissing: raise MAVSubstituteError("unknown variable in '%s%s%s'" % ( self.start_var_token, varname, self.end_var_token)) else: return text[0:idx+endidx] + self.substitute(text[idx+endidx:], subvars, checkmissing=False) text = text.replace("%s%s%s" % (self.start_var_token, fullvar, self.end_var_token), str(value)) return text
[ "def", "substitute", "(", "self", ",", "text", ",", "subvars", "=", "{", "}", ",", "checkmissing", "=", "None", ")", ":", "if", "checkmissing", "is", "None", ":", "checkmissing", "=", "self", ".", "checkmissing", "while", "True", ":", "idx", "=", "text", ".", "find", "(", "self", ".", "start_var_token", ")", "if", "idx", "==", "-", "1", ":", "return", "text", "endidx", "=", "text", "[", "idx", ":", "]", ".", "find", "(", "self", ".", "end_var_token", ")", "if", "endidx", "==", "-", "1", ":", "raise", "MAVSubstituteError", "(", "'missing end of variable: %s'", "%", "text", "[", "idx", ":", "idx", "+", "10", "]", ")", "varname", "=", "text", "[", "idx", "+", "2", ":", "idx", "+", "endidx", "]", "fullvar", "=", "varname", "# allow default value after a :", "def_value", "=", "None", "colon", "=", "varname", ".", "find", "(", "':'", ")", "if", "colon", "!=", "-", "1", ":", "def_value", "=", "varname", "[", "colon", "+", "1", ":", "]", "varname", "=", "varname", "[", ":", "colon", "]", "if", "varname", "in", "subvars", ":", "value", "=", "subvars", "[", "varname", "]", "elif", "def_value", "is", "not", "None", ":", "value", "=", "def_value", "elif", "checkmissing", ":", "raise", "MAVSubstituteError", "(", "\"unknown variable in '%s%s%s'\"", "%", "(", "self", ".", "start_var_token", ",", "varname", ",", "self", ".", "end_var_token", ")", ")", "else", ":", "return", "text", "[", "0", ":", "idx", "+", "endidx", "]", "+", "self", ".", "substitute", "(", "text", "[", "idx", "+", "endidx", ":", "]", ",", "subvars", ",", "checkmissing", "=", "False", ")", "text", "=", "text", ".", "replace", "(", "\"%s%s%s\"", "%", "(", "self", ".", "start_var_token", ",", "fullvar", ",", "self", ".", "end_var_token", ")", ",", "str", "(", "value", ")", ")", "return", "text" ]
38.8
18.228571
def push_external_commands(self, commands): """Send a HTTP request to the satellite (POST /r_un_external_commands) to send the external commands to the satellite :param results: Results list to send :type results: list :return: True on success, False on failure :rtype: bool """ logger.debug("Pushing %d external commands", len(commands)) return self.con.post('_run_external_commands', {'cmds': commands}, wait=True)
[ "def", "push_external_commands", "(", "self", ",", "commands", ")", ":", "logger", ".", "debug", "(", "\"Pushing %d external commands\"", ",", "len", "(", "commands", ")", ")", "return", "self", ".", "con", ".", "post", "(", "'_run_external_commands'", ",", "{", "'cmds'", ":", "commands", "}", ",", "wait", "=", "True", ")" ]
43.545455
16
def get_scripts(self): """ Get the scripts at the path in sorted order as set in the module properties :return: a sorted list of scripts """ ret = list() for d in self.allpaths: scripts = filter(lambda x: x.startswith(self.prefix), os.listdir(d)) scripts.sort(reverse=(not self.sort_ascending)) ret = ret + [os.path.join(d, x) for x in scripts] return(ret)
[ "def", "get_scripts", "(", "self", ")", ":", "ret", "=", "list", "(", ")", "for", "d", "in", "self", ".", "allpaths", ":", "scripts", "=", "filter", "(", "lambda", "x", ":", "x", ".", "startswith", "(", "self", ".", "prefix", ")", ",", "os", ".", "listdir", "(", "d", ")", ")", "scripts", ".", "sort", "(", "reverse", "=", "(", "not", "self", ".", "sort_ascending", ")", ")", "ret", "=", "ret", "+", "[", "os", ".", "path", ".", "join", "(", "d", ",", "x", ")", "for", "x", "in", "scripts", "]", "return", "(", "ret", ")" ]
36.5
19.333333
def meanFracdet(map_fracdet, lon_population, lat_population, radius_population): """ Compute the mean fracdet within circular aperture (radius specified in decimal degrees) lon, lat, and radius are taken to be arrays of the same length """ nside_fracdet = healpy.npix2nside(len(map_fracdet)) map_fracdet_zero = np.where(map_fracdet >= 0., map_fracdet, 0.) fracdet_population = np.empty(len(lon_population)) for ii in range(0, len(lon_population)): fracdet_population[ii] = np.mean(map_fracdet_zero[ugali.utils.healpix.ang2disc(nside_fracdet, lon_population[ii], lat_population[ii], radius_population if np.isscalar(radius_population) else radius_population[ii], inclusive=True)]) return fracdet_population
[ "def", "meanFracdet", "(", "map_fracdet", ",", "lon_population", ",", "lat_population", ",", "radius_population", ")", ":", "nside_fracdet", "=", "healpy", ".", "npix2nside", "(", "len", "(", "map_fracdet", ")", ")", "map_fracdet_zero", "=", "np", ".", "where", "(", "map_fracdet", ">=", "0.", ",", "map_fracdet", ",", "0.", ")", "fracdet_population", "=", "np", ".", "empty", "(", "len", "(", "lon_population", ")", ")", "for", "ii", "in", "range", "(", "0", ",", "len", "(", "lon_population", ")", ")", ":", "fracdet_population", "[", "ii", "]", "=", "np", ".", "mean", "(", "map_fracdet_zero", "[", "ugali", ".", "utils", ".", "healpix", ".", "ang2disc", "(", "nside_fracdet", ",", "lon_population", "[", "ii", "]", ",", "lat_population", "[", "ii", "]", ",", "radius_population", "if", "np", ".", "isscalar", "(", "radius_population", ")", "else", "radius_population", "[", "ii", "]", ",", "inclusive", "=", "True", ")", "]", ")", "return", "fracdet_population" ]
67.875
38.375
def parse_map(cls, data, dist=None): """Parse a map of entry point groups""" if isinstance(data, dict): data = data.items() else: data = split_sections(data) maps = {} for group, lines in data: if group is None: if not lines: continue raise ValueError("Entry points must be listed in groups") group = group.strip() if group in maps: raise ValueError("Duplicate group name", group) maps[group] = cls.parse_group(group, lines, dist) return maps
[ "def", "parse_map", "(", "cls", ",", "data", ",", "dist", "=", "None", ")", ":", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "data", "=", "data", ".", "items", "(", ")", "else", ":", "data", "=", "split_sections", "(", "data", ")", "maps", "=", "{", "}", "for", "group", ",", "lines", "in", "data", ":", "if", "group", "is", "None", ":", "if", "not", "lines", ":", "continue", "raise", "ValueError", "(", "\"Entry points must be listed in groups\"", ")", "group", "=", "group", ".", "strip", "(", ")", "if", "group", "in", "maps", ":", "raise", "ValueError", "(", "\"Duplicate group name\"", ",", "group", ")", "maps", "[", "group", "]", "=", "cls", ".", "parse_group", "(", "group", ",", "lines", ",", "dist", ")", "return", "maps" ]
36.117647
13.352941
def _plot2d(plotfunc): """ Decorator for common 2d plotting logic Also adds the 2d plot method to class _PlotMethods """ commondoc = """ Parameters ---------- darray : DataArray Must be 2 dimensional, unless creating faceted plots x : string, optional Coordinate for x axis. If None use darray.dims[1] y : string, optional Coordinate for y axis. If None use darray.dims[0] figsize : tuple, optional A tuple (width, height) of the figure in inches. Mutually exclusive with ``size`` and ``ax``. aspect : scalar, optional Aspect ratio of plot, so that ``aspect * size`` gives the width in inches. Only used if a ``size`` is provided. size : scalar, optional If provided, create a new figure for the plot with the given size. Height (in inches) of each plot. See also: ``aspect``. ax : matplotlib axes object, optional Axis on which to plot this figure. By default, use the current axis. Mutually exclusive with ``size`` and ``figsize``. row : string, optional If passed, make row faceted plots on this dimension name col : string, optional If passed, make column faceted plots on this dimension name col_wrap : integer, optional Use together with ``col`` to wrap faceted plots xscale, yscale : 'linear', 'symlog', 'log', 'logit', optional Specifies scaling for the x- and y-axes respectively xticks, yticks : Specify tick locations for x- and y-axes xlim, ylim : Specify x- and y-axes limits xincrease : None, True, or False, optional Should the values on the x axes be increasing from left to right? if None, use the default for the matplotlib function. yincrease : None, True, or False, optional Should the values on the y axes be increasing from top to bottom? if None, use the default for the matplotlib function. add_colorbar : Boolean, optional Adds colorbar to axis add_labels : Boolean, optional Use xarray metadata to label axes norm : ``matplotlib.colors.Normalize`` instance, optional If the ``norm`` has vmin or vmax specified, the corresponding kwarg must be None. vmin, vmax : floats, optional Values to anchor the colormap, otherwise they are inferred from the data and other keyword arguments. When a diverging dataset is inferred, setting one of these values will fix the other by symmetry around ``center``. Setting both values prevents use of a diverging colormap. If discrete levels are provided as an explicit list, both of these values are ignored. cmap : matplotlib colormap name or object, optional The mapping from data values to color space. If not provided, this will be either be ``viridis`` (if the function infers a sequential dataset) or ``RdBu_r`` (if the function infers a diverging dataset). When `Seaborn` is installed, ``cmap`` may also be a `seaborn` color palette. If ``cmap`` is seaborn color palette and the plot type is not ``contour`` or ``contourf``, ``levels`` must also be specified. colors : discrete colors to plot, optional A single color or a list of colors. If the plot type is not ``contour`` or ``contourf``, the ``levels`` argument is required. center : float, optional The value at which to center the colormap. Passing this value implies use of a diverging colormap. Setting it to ``False`` prevents use of a diverging colormap. robust : bool, optional If True and ``vmin`` or ``vmax`` are absent, the colormap range is computed with 2nd and 98th percentiles instead of the extreme values. extend : {'neither', 'both', 'min', 'max'}, optional How to draw arrows extending the colorbar beyond its limits. If not provided, extend is inferred from vmin, vmax and the data limits. levels : int or list-like object, optional Split the colormap (cmap) into discrete color intervals. If an integer is provided, "nice" levels are chosen based on the data range: this can imply that the final number of levels is not exactly the expected one. Setting ``vmin`` and/or ``vmax`` with ``levels=N`` is equivalent to setting ``levels=np.linspace(vmin, vmax, N)``. infer_intervals : bool, optional Only applies to pcolormesh. If True, the coordinate intervals are passed to pcolormesh. If False, the original coordinates are used (this can be useful for certain map projections). The default is to always infer intervals, unless the mesh is irregular and plotted on a map projection. subplot_kws : dict, optional Dictionary of keyword arguments for matplotlib subplots. Only applies to FacetGrid plotting. cbar_ax : matplotlib Axes, optional Axes in which to draw the colorbar. cbar_kwargs : dict, optional Dictionary of keyword arguments to pass to the colorbar. **kwargs : optional Additional arguments to wrapped matplotlib function Returns ------- artist : The same type of primitive artist that the wrapped matplotlib function returns """ # Build on the original docstring plotfunc.__doc__ = '%s\n%s' % (plotfunc.__doc__, commondoc) @functools.wraps(plotfunc) def newplotfunc(darray, x=None, y=None, figsize=None, size=None, aspect=None, ax=None, row=None, col=None, col_wrap=None, xincrease=True, yincrease=True, add_colorbar=None, add_labels=True, vmin=None, vmax=None, cmap=None, center=None, robust=False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws=None, cbar_ax=None, cbar_kwargs=None, xscale=None, yscale=None, xticks=None, yticks=None, xlim=None, ylim=None, norm=None, **kwargs): # All 2d plots in xarray share this function signature. # Method signature below should be consistent. # Decide on a default for the colorbar before facetgrids if add_colorbar is None: add_colorbar = plotfunc.__name__ != 'contour' imshow_rgb = ( plotfunc.__name__ == 'imshow' and darray.ndim == (3 + (row is not None) + (col is not None))) if imshow_rgb: # Don't add a colorbar when showing an image with explicit colors add_colorbar = False # Matplotlib does not support normalising RGB data, so do it here. # See eg. https://github.com/matplotlib/matplotlib/pull/10220 if robust or vmax is not None or vmin is not None: darray = _rescale_imshow_rgb(darray, vmin, vmax, robust) vmin, vmax, robust = None, None, False # Handle facetgrids first if row or col: allargs = locals().copy() allargs.pop('imshow_rgb') allargs.update(allargs.pop('kwargs')) allargs.pop('darray') # Need the decorated plotting function allargs['plotfunc'] = globals()[plotfunc.__name__] return _easy_facetgrid(darray, kind='dataarray', **allargs) plt = import_matplotlib_pyplot() rgb = kwargs.pop('rgb', None) if rgb is not None and plotfunc.__name__ != 'imshow': raise ValueError('The "rgb" keyword is only valid for imshow()') elif rgb is not None and not imshow_rgb: raise ValueError('The "rgb" keyword is only valid for imshow()' 'with a three-dimensional array (per facet)') xlab, ylab = _infer_xy_labels( darray=darray, x=x, y=y, imshow=imshow_rgb, rgb=rgb) # better to pass the ndarrays directly to plotting functions xval = darray[xlab].values yval = darray[ylab].values # check if we need to broadcast one dimension if xval.ndim < yval.ndim: xval = np.broadcast_to(xval, yval.shape) if yval.ndim < xval.ndim: yval = np.broadcast_to(yval, xval.shape) # May need to transpose for correct x, y labels # xlab may be the name of a coord, we have to check for dim names if imshow_rgb: # For RGB[A] images, matplotlib requires the color dimension # to be last. In Xarray the order should be unimportant, so # we transpose to (y, x, color) to make this work. yx_dims = (ylab, xlab) dims = yx_dims + tuple(d for d in darray.dims if d not in yx_dims) if dims != darray.dims: darray = darray.transpose(*dims) elif darray[xlab].dims[-1] == darray.dims[0]: darray = darray.transpose() # Pass the data as a masked ndarray too zval = darray.to_masked_array(copy=False) # Replace pd.Intervals if contained in xval or yval. xplt, xlab_extra = _resolve_intervals_2dplot(xval, plotfunc.__name__) yplt, ylab_extra = _resolve_intervals_2dplot(yval, plotfunc.__name__) _ensure_plottable(xplt, yplt) cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs( plotfunc, locals(), zval.data) if 'contour' in plotfunc.__name__: # extend is a keyword argument only for contour and contourf, but # passing it to the colorbar is sufficient for imshow and # pcolormesh kwargs['extend'] = cmap_params['extend'] kwargs['levels'] = cmap_params['levels'] # if colors == a single color, matplotlib draws dashed negative # contours. we lose this feature if we pass cmap and not colors if isinstance(colors, str): cmap_params['cmap'] = None kwargs['colors'] = colors if 'pcolormesh' == plotfunc.__name__: kwargs['infer_intervals'] = infer_intervals if 'imshow' == plotfunc.__name__ and isinstance(aspect, str): # forbid usage of mpl strings raise ValueError("plt.imshow's `aspect` kwarg is not available " "in xarray") ax = get_axis(figsize, size, aspect, ax) primitive = plotfunc(xplt, yplt, zval, ax=ax, cmap=cmap_params['cmap'], vmin=cmap_params['vmin'], vmax=cmap_params['vmax'], norm=cmap_params['norm'], **kwargs) # Label the plot with metadata if add_labels: ax.set_xlabel(label_from_attrs(darray[xlab], xlab_extra)) ax.set_ylabel(label_from_attrs(darray[ylab], ylab_extra)) ax.set_title(darray._title_for_slice()) if add_colorbar: if add_labels and 'label' not in cbar_kwargs: cbar_kwargs['label'] = label_from_attrs(darray) cbar = _add_colorbar(primitive, ax, cbar_ax, cbar_kwargs, cmap_params) elif (cbar_ax is not None or cbar_kwargs): # inform the user about keywords which aren't used raise ValueError("cbar_ax and cbar_kwargs can't be used with " "add_colorbar=False.") # origin kwarg overrides yincrease if 'origin' in kwargs: yincrease = None _update_axes(ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim) # Rotate dates on xlabels # Do this without calling autofmt_xdate so that x-axes ticks # on other subplots (if any) are not deleted. # https://stackoverflow.com/questions/17430105/autofmt-xdate-deletes-x-axis-labels-of-all-subplots if np.issubdtype(xplt.dtype, np.datetime64): for xlabels in ax.get_xticklabels(): xlabels.set_rotation(30) xlabels.set_ha('right') return primitive # For use as DataArray.plot.plotmethod @functools.wraps(newplotfunc) def plotmethod(_PlotMethods_obj, x=None, y=None, figsize=None, size=None, aspect=None, ax=None, row=None, col=None, col_wrap=None, xincrease=True, yincrease=True, add_colorbar=None, add_labels=True, vmin=None, vmax=None, cmap=None, colors=None, center=None, robust=False, extend=None, levels=None, infer_intervals=None, subplot_kws=None, cbar_ax=None, cbar_kwargs=None, xscale=None, yscale=None, xticks=None, yticks=None, xlim=None, ylim=None, norm=None, **kwargs): """ The method should have the same signature as the function. This just makes the method work on Plotmethods objects, and passes all the other arguments straight through. """ allargs = locals() allargs['darray'] = _PlotMethods_obj._da allargs.update(kwargs) for arg in ['_PlotMethods_obj', 'newplotfunc', 'kwargs']: del allargs[arg] return newplotfunc(**allargs) # Add to class _PlotMethods setattr(_PlotMethods, plotmethod.__name__, plotmethod) return newplotfunc
[ "def", "_plot2d", "(", "plotfunc", ")", ":", "commondoc", "=", "\"\"\"\n Parameters\n ----------\n darray : DataArray\n Must be 2 dimensional, unless creating faceted plots\n x : string, optional\n Coordinate for x axis. If None use darray.dims[1]\n y : string, optional\n Coordinate for y axis. If None use darray.dims[0]\n figsize : tuple, optional\n A tuple (width, height) of the figure in inches.\n Mutually exclusive with ``size`` and ``ax``.\n aspect : scalar, optional\n Aspect ratio of plot, so that ``aspect * size`` gives the width in\n inches. Only used if a ``size`` is provided.\n size : scalar, optional\n If provided, create a new figure for the plot with the given size.\n Height (in inches) of each plot. See also: ``aspect``.\n ax : matplotlib axes object, optional\n Axis on which to plot this figure. By default, use the current axis.\n Mutually exclusive with ``size`` and ``figsize``.\n row : string, optional\n If passed, make row faceted plots on this dimension name\n col : string, optional\n If passed, make column faceted plots on this dimension name\n col_wrap : integer, optional\n Use together with ``col`` to wrap faceted plots\n xscale, yscale : 'linear', 'symlog', 'log', 'logit', optional\n Specifies scaling for the x- and y-axes respectively\n xticks, yticks : Specify tick locations for x- and y-axes\n xlim, ylim : Specify x- and y-axes limits\n xincrease : None, True, or False, optional\n Should the values on the x axes be increasing from left to right?\n if None, use the default for the matplotlib function.\n yincrease : None, True, or False, optional\n Should the values on the y axes be increasing from top to bottom?\n if None, use the default for the matplotlib function.\n add_colorbar : Boolean, optional\n Adds colorbar to axis\n add_labels : Boolean, optional\n Use xarray metadata to label axes\n norm : ``matplotlib.colors.Normalize`` instance, optional\n If the ``norm`` has vmin or vmax specified, the corresponding kwarg\n must be None.\n vmin, vmax : floats, optional\n Values to anchor the colormap, otherwise they are inferred from the\n data and other keyword arguments. When a diverging dataset is inferred,\n setting one of these values will fix the other by symmetry around\n ``center``. Setting both values prevents use of a diverging colormap.\n If discrete levels are provided as an explicit list, both of these\n values are ignored.\n cmap : matplotlib colormap name or object, optional\n The mapping from data values to color space. If not provided, this\n will be either be ``viridis`` (if the function infers a sequential\n dataset) or ``RdBu_r`` (if the function infers a diverging dataset).\n When `Seaborn` is installed, ``cmap`` may also be a `seaborn`\n color palette. If ``cmap`` is seaborn color palette and the plot type\n is not ``contour`` or ``contourf``, ``levels`` must also be specified.\n colors : discrete colors to plot, optional\n A single color or a list of colors. If the plot type is not ``contour``\n or ``contourf``, the ``levels`` argument is required.\n center : float, optional\n The value at which to center the colormap. Passing this value implies\n use of a diverging colormap. Setting it to ``False`` prevents use of a\n diverging colormap.\n robust : bool, optional\n If True and ``vmin`` or ``vmax`` are absent, the colormap range is\n computed with 2nd and 98th percentiles instead of the extreme values.\n extend : {'neither', 'both', 'min', 'max'}, optional\n How to draw arrows extending the colorbar beyond its limits. If not\n provided, extend is inferred from vmin, vmax and the data limits.\n levels : int or list-like object, optional\n Split the colormap (cmap) into discrete color intervals. If an integer\n is provided, \"nice\" levels are chosen based on the data range: this can\n imply that the final number of levels is not exactly the expected one.\n Setting ``vmin`` and/or ``vmax`` with ``levels=N`` is equivalent to\n setting ``levels=np.linspace(vmin, vmax, N)``.\n infer_intervals : bool, optional\n Only applies to pcolormesh. If True, the coordinate intervals are\n passed to pcolormesh. If False, the original coordinates are used\n (this can be useful for certain map projections). The default is to\n always infer intervals, unless the mesh is irregular and plotted on\n a map projection.\n subplot_kws : dict, optional\n Dictionary of keyword arguments for matplotlib subplots. Only applies\n to FacetGrid plotting.\n cbar_ax : matplotlib Axes, optional\n Axes in which to draw the colorbar.\n cbar_kwargs : dict, optional\n Dictionary of keyword arguments to pass to the colorbar.\n **kwargs : optional\n Additional arguments to wrapped matplotlib function\n\n Returns\n -------\n artist :\n The same type of primitive artist that the wrapped matplotlib\n function returns\n \"\"\"", "# Build on the original docstring", "plotfunc", ".", "__doc__", "=", "'%s\\n%s'", "%", "(", "plotfunc", ".", "__doc__", ",", "commondoc", ")", "@", "functools", ".", "wraps", "(", "plotfunc", ")", "def", "newplotfunc", "(", "darray", ",", "x", "=", "None", ",", "y", "=", "None", ",", "figsize", "=", "None", ",", "size", "=", "None", ",", "aspect", "=", "None", ",", "ax", "=", "None", ",", "row", "=", "None", ",", "col", "=", "None", ",", "col_wrap", "=", "None", ",", "xincrease", "=", "True", ",", "yincrease", "=", "True", ",", "add_colorbar", "=", "None", ",", "add_labels", "=", "True", ",", "vmin", "=", "None", ",", "vmax", "=", "None", ",", "cmap", "=", "None", ",", "center", "=", "None", ",", "robust", "=", "False", ",", "extend", "=", "None", ",", "levels", "=", "None", ",", "infer_intervals", "=", "None", ",", "colors", "=", "None", ",", "subplot_kws", "=", "None", ",", "cbar_ax", "=", "None", ",", "cbar_kwargs", "=", "None", ",", "xscale", "=", "None", ",", "yscale", "=", "None", ",", "xticks", "=", "None", ",", "yticks", "=", "None", ",", "xlim", "=", "None", ",", "ylim", "=", "None", ",", "norm", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# All 2d plots in xarray share this function signature.", "# Method signature below should be consistent.", "# Decide on a default for the colorbar before facetgrids", "if", "add_colorbar", "is", "None", ":", "add_colorbar", "=", "plotfunc", ".", "__name__", "!=", "'contour'", "imshow_rgb", "=", "(", "plotfunc", ".", "__name__", "==", "'imshow'", "and", "darray", ".", "ndim", "==", "(", "3", "+", "(", "row", "is", "not", "None", ")", "+", "(", "col", "is", "not", "None", ")", ")", ")", "if", "imshow_rgb", ":", "# Don't add a colorbar when showing an image with explicit colors", "add_colorbar", "=", "False", "# Matplotlib does not support normalising RGB data, so do it here.", "# See eg. https://github.com/matplotlib/matplotlib/pull/10220", "if", "robust", "or", "vmax", "is", "not", "None", "or", "vmin", "is", "not", "None", ":", "darray", "=", "_rescale_imshow_rgb", "(", "darray", ",", "vmin", ",", "vmax", ",", "robust", ")", "vmin", ",", "vmax", ",", "robust", "=", "None", ",", "None", ",", "False", "# Handle facetgrids first", "if", "row", "or", "col", ":", "allargs", "=", "locals", "(", ")", ".", "copy", "(", ")", "allargs", ".", "pop", "(", "'imshow_rgb'", ")", "allargs", ".", "update", "(", "allargs", ".", "pop", "(", "'kwargs'", ")", ")", "allargs", ".", "pop", "(", "'darray'", ")", "# Need the decorated plotting function", "allargs", "[", "'plotfunc'", "]", "=", "globals", "(", ")", "[", "plotfunc", ".", "__name__", "]", "return", "_easy_facetgrid", "(", "darray", ",", "kind", "=", "'dataarray'", ",", "*", "*", "allargs", ")", "plt", "=", "import_matplotlib_pyplot", "(", ")", "rgb", "=", "kwargs", ".", "pop", "(", "'rgb'", ",", "None", ")", "if", "rgb", "is", "not", "None", "and", "plotfunc", ".", "__name__", "!=", "'imshow'", ":", "raise", "ValueError", "(", "'The \"rgb\" keyword is only valid for imshow()'", ")", "elif", "rgb", "is", "not", "None", "and", "not", "imshow_rgb", ":", "raise", "ValueError", "(", "'The \"rgb\" keyword is only valid for imshow()'", "'with a three-dimensional array (per facet)'", ")", "xlab", ",", "ylab", "=", "_infer_xy_labels", "(", "darray", "=", "darray", ",", "x", "=", "x", ",", "y", "=", "y", ",", "imshow", "=", "imshow_rgb", ",", "rgb", "=", "rgb", ")", "# better to pass the ndarrays directly to plotting functions", "xval", "=", "darray", "[", "xlab", "]", ".", "values", "yval", "=", "darray", "[", "ylab", "]", ".", "values", "# check if we need to broadcast one dimension", "if", "xval", ".", "ndim", "<", "yval", ".", "ndim", ":", "xval", "=", "np", ".", "broadcast_to", "(", "xval", ",", "yval", ".", "shape", ")", "if", "yval", ".", "ndim", "<", "xval", ".", "ndim", ":", "yval", "=", "np", ".", "broadcast_to", "(", "yval", ",", "xval", ".", "shape", ")", "# May need to transpose for correct x, y labels", "# xlab may be the name of a coord, we have to check for dim names", "if", "imshow_rgb", ":", "# For RGB[A] images, matplotlib requires the color dimension", "# to be last. In Xarray the order should be unimportant, so", "# we transpose to (y, x, color) to make this work.", "yx_dims", "=", "(", "ylab", ",", "xlab", ")", "dims", "=", "yx_dims", "+", "tuple", "(", "d", "for", "d", "in", "darray", ".", "dims", "if", "d", "not", "in", "yx_dims", ")", "if", "dims", "!=", "darray", ".", "dims", ":", "darray", "=", "darray", ".", "transpose", "(", "*", "dims", ")", "elif", "darray", "[", "xlab", "]", ".", "dims", "[", "-", "1", "]", "==", "darray", ".", "dims", "[", "0", "]", ":", "darray", "=", "darray", ".", "transpose", "(", ")", "# Pass the data as a masked ndarray too", "zval", "=", "darray", ".", "to_masked_array", "(", "copy", "=", "False", ")", "# Replace pd.Intervals if contained in xval or yval.", "xplt", ",", "xlab_extra", "=", "_resolve_intervals_2dplot", "(", "xval", ",", "plotfunc", ".", "__name__", ")", "yplt", ",", "ylab_extra", "=", "_resolve_intervals_2dplot", "(", "yval", ",", "plotfunc", ".", "__name__", ")", "_ensure_plottable", "(", "xplt", ",", "yplt", ")", "cmap_params", ",", "cbar_kwargs", "=", "_process_cmap_cbar_kwargs", "(", "plotfunc", ",", "locals", "(", ")", ",", "zval", ".", "data", ")", "if", "'contour'", "in", "plotfunc", ".", "__name__", ":", "# extend is a keyword argument only for contour and contourf, but", "# passing it to the colorbar is sufficient for imshow and", "# pcolormesh", "kwargs", "[", "'extend'", "]", "=", "cmap_params", "[", "'extend'", "]", "kwargs", "[", "'levels'", "]", "=", "cmap_params", "[", "'levels'", "]", "# if colors == a single color, matplotlib draws dashed negative", "# contours. we lose this feature if we pass cmap and not colors", "if", "isinstance", "(", "colors", ",", "str", ")", ":", "cmap_params", "[", "'cmap'", "]", "=", "None", "kwargs", "[", "'colors'", "]", "=", "colors", "if", "'pcolormesh'", "==", "plotfunc", ".", "__name__", ":", "kwargs", "[", "'infer_intervals'", "]", "=", "infer_intervals", "if", "'imshow'", "==", "plotfunc", ".", "__name__", "and", "isinstance", "(", "aspect", ",", "str", ")", ":", "# forbid usage of mpl strings", "raise", "ValueError", "(", "\"plt.imshow's `aspect` kwarg is not available \"", "\"in xarray\"", ")", "ax", "=", "get_axis", "(", "figsize", ",", "size", ",", "aspect", ",", "ax", ")", "primitive", "=", "plotfunc", "(", "xplt", ",", "yplt", ",", "zval", ",", "ax", "=", "ax", ",", "cmap", "=", "cmap_params", "[", "'cmap'", "]", ",", "vmin", "=", "cmap_params", "[", "'vmin'", "]", ",", "vmax", "=", "cmap_params", "[", "'vmax'", "]", ",", "norm", "=", "cmap_params", "[", "'norm'", "]", ",", "*", "*", "kwargs", ")", "# Label the plot with metadata", "if", "add_labels", ":", "ax", ".", "set_xlabel", "(", "label_from_attrs", "(", "darray", "[", "xlab", "]", ",", "xlab_extra", ")", ")", "ax", ".", "set_ylabel", "(", "label_from_attrs", "(", "darray", "[", "ylab", "]", ",", "ylab_extra", ")", ")", "ax", ".", "set_title", "(", "darray", ".", "_title_for_slice", "(", ")", ")", "if", "add_colorbar", ":", "if", "add_labels", "and", "'label'", "not", "in", "cbar_kwargs", ":", "cbar_kwargs", "[", "'label'", "]", "=", "label_from_attrs", "(", "darray", ")", "cbar", "=", "_add_colorbar", "(", "primitive", ",", "ax", ",", "cbar_ax", ",", "cbar_kwargs", ",", "cmap_params", ")", "elif", "(", "cbar_ax", "is", "not", "None", "or", "cbar_kwargs", ")", ":", "# inform the user about keywords which aren't used", "raise", "ValueError", "(", "\"cbar_ax and cbar_kwargs can't be used with \"", "\"add_colorbar=False.\"", ")", "# origin kwarg overrides yincrease", "if", "'origin'", "in", "kwargs", ":", "yincrease", "=", "None", "_update_axes", "(", "ax", ",", "xincrease", ",", "yincrease", ",", "xscale", ",", "yscale", ",", "xticks", ",", "yticks", ",", "xlim", ",", "ylim", ")", "# Rotate dates on xlabels", "# Do this without calling autofmt_xdate so that x-axes ticks", "# on other subplots (if any) are not deleted.", "# https://stackoverflow.com/questions/17430105/autofmt-xdate-deletes-x-axis-labels-of-all-subplots", "if", "np", ".", "issubdtype", "(", "xplt", ".", "dtype", ",", "np", ".", "datetime64", ")", ":", "for", "xlabels", "in", "ax", ".", "get_xticklabels", "(", ")", ":", "xlabels", ".", "set_rotation", "(", "30", ")", "xlabels", ".", "set_ha", "(", "'right'", ")", "return", "primitive", "# For use as DataArray.plot.plotmethod", "@", "functools", ".", "wraps", "(", "newplotfunc", ")", "def", "plotmethod", "(", "_PlotMethods_obj", ",", "x", "=", "None", ",", "y", "=", "None", ",", "figsize", "=", "None", ",", "size", "=", "None", ",", "aspect", "=", "None", ",", "ax", "=", "None", ",", "row", "=", "None", ",", "col", "=", "None", ",", "col_wrap", "=", "None", ",", "xincrease", "=", "True", ",", "yincrease", "=", "True", ",", "add_colorbar", "=", "None", ",", "add_labels", "=", "True", ",", "vmin", "=", "None", ",", "vmax", "=", "None", ",", "cmap", "=", "None", ",", "colors", "=", "None", ",", "center", "=", "None", ",", "robust", "=", "False", ",", "extend", "=", "None", ",", "levels", "=", "None", ",", "infer_intervals", "=", "None", ",", "subplot_kws", "=", "None", ",", "cbar_ax", "=", "None", ",", "cbar_kwargs", "=", "None", ",", "xscale", "=", "None", ",", "yscale", "=", "None", ",", "xticks", "=", "None", ",", "yticks", "=", "None", ",", "xlim", "=", "None", ",", "ylim", "=", "None", ",", "norm", "=", "None", ",", "*", "*", "kwargs", ")", ":", "\"\"\"\n The method should have the same signature as the function.\n\n This just makes the method work on Plotmethods objects,\n and passes all the other arguments straight through.\n \"\"\"", "allargs", "=", "locals", "(", ")", "allargs", "[", "'darray'", "]", "=", "_PlotMethods_obj", ".", "_da", "allargs", ".", "update", "(", "kwargs", ")", "for", "arg", "in", "[", "'_PlotMethods_obj'", ",", "'newplotfunc'", ",", "'kwargs'", "]", ":", "del", "allargs", "[", "arg", "]", "return", "newplotfunc", "(", "*", "*", "allargs", ")", "# Add to class _PlotMethods", "setattr", "(", "_PlotMethods", ",", "plotmethod", ".", "__name__", ",", "plotmethod", ")", "return", "newplotfunc" ]
46.003509
21.189474
def simplify(self): """ Simplify the content of a Composite merging any parts that can be and returning the new Composite as well as updating itself internally """ final_output = [] diff_last = None item_last = None for item in self._content: # remove any undefined colors if hasattr(item.get("color"), "none_setting"): del item["color"] # ignore empty items if not item.get("full_text") and not item.get("separator"): continue # merge items if we can diff = item.copy() del diff["full_text"] if diff == diff_last or (item["full_text"].strip() == "" and item_last): item_last["full_text"] += item["full_text"] else: diff_last = diff item_last = item.copy() # copy item as we may change it final_output.append(item_last) self._content = final_output return self
[ "def", "simplify", "(", "self", ")", ":", "final_output", "=", "[", "]", "diff_last", "=", "None", "item_last", "=", "None", "for", "item", "in", "self", ".", "_content", ":", "# remove any undefined colors", "if", "hasattr", "(", "item", ".", "get", "(", "\"color\"", ")", ",", "\"none_setting\"", ")", ":", "del", "item", "[", "\"color\"", "]", "# ignore empty items", "if", "not", "item", ".", "get", "(", "\"full_text\"", ")", "and", "not", "item", ".", "get", "(", "\"separator\"", ")", ":", "continue", "# merge items if we can", "diff", "=", "item", ".", "copy", "(", ")", "del", "diff", "[", "\"full_text\"", "]", "if", "diff", "==", "diff_last", "or", "(", "item", "[", "\"full_text\"", "]", ".", "strip", "(", ")", "==", "\"\"", "and", "item_last", ")", ":", "item_last", "[", "\"full_text\"", "]", "+=", "item", "[", "\"full_text\"", "]", "else", ":", "diff_last", "=", "diff", "item_last", "=", "item", ".", "copy", "(", ")", "# copy item as we may change it", "final_output", ".", "append", "(", "item_last", ")", "self", ".", "_content", "=", "final_output", "return", "self" ]
37.777778
16.444444
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state, error): """Checks rules from the 'C++ style rules' section of cppguide.html. Most of these rules are hard to test (naming, comment style), but we do what we can. In particular we check for 2-space indents, line lengths, tab usage, spaces inside code, etc. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. file_extension: The extension (without the dot) of the filename. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # Don't use "elided" lines here, otherwise we can't check commented lines. # Don't want to use "raw" either, because we don't want to check inside C++11 # raw strings, raw_lines = clean_lines.lines_without_raw_strings line = raw_lines[linenum] if line.find('\t') != -1: error(filename, linenum, 'whitespace/tab', 1, 'Tab found; better to use spaces') # One or three blank spaces at the beginning of the line is weird; it's # hard to reconcile that with 2-space indents. # NOTE: here are the conditions rob pike used for his tests. Mine aren't # as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces # if(RLENGTH > 20) complain = 0; # if(match($0, " +(error|private|public|protected):")) complain = 0; # if(match(prev, "&& *$")) complain = 0; # if(match(prev, "\\|\\| *$")) complain = 0; # if(match(prev, "[\",=><] *$")) complain = 0; # if(match($0, " <<")) complain = 0; # if(match(prev, " +for \\(")) complain = 0; # if(prevodd && match(prevprev, " +for \\(")) complain = 0; scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$' classinfo = nesting_state.InnermostClass() initial_spaces = 0 cleansed_line = clean_lines.elided[linenum] while initial_spaces < len(line) and line[initial_spaces] == ' ': initial_spaces += 1 if line and line[-1].isspace(): error(filename, linenum, 'whitespace/end_of_line', 4, 'Line ends in whitespace. Consider deleting these extra spaces.') # There are certain situations we allow one space, notably for # section labels, and also lines containing multi-line raw strings. elif ((initial_spaces == 1 or initial_spaces == 3) and not Match(scope_or_label_pattern, cleansed_line) and not (clean_lines.raw_lines[linenum] != line and Match(r'^\s*""', line))): error(filename, linenum, 'whitespace/indent', 3, 'Weird number of spaces at line-start. ' 'Are you using a 2-space indent?') # Check if the line is a header guard. is_header_guard = False if file_extension == 'h': cppvar = GetHeaderGuardCPPVariable(filename) if (line.startswith('#ifndef %s' % cppvar) or line.startswith('#define %s' % cppvar) or line.startswith('#endif // %s' % cppvar)): is_header_guard = True # #include lines and header guards can be long, since there's no clean way to # split them. # # URLs can be long too. It's possible to split these, but it makes them # harder to cut&paste. # # The "$Id:...$" comment may also get very long without it being the # developers fault. if (not line.startswith('#include') and not is_header_guard and not Match(r'^\s*//.*http(s?)://\S*$', line) and not Match(r'^// \$Id:.*#[0-9]+ \$$', line)): line_width = GetLineWidth(line) extended_length = int((_line_length * 1.25)) if line_width > extended_length: error(filename, linenum, 'whitespace/line_length', 4, 'Lines should very rarely be longer than %i characters' % extended_length) elif line_width > _line_length: error(filename, linenum, 'whitespace/line_length', 2, 'Lines should be <= %i characters long' % _line_length) if (cleansed_line.count(';') > 1 and # for loops are allowed two ;'s (and may run over two lines). cleansed_line.find('for') == -1 and (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and # It's ok to have many commands in a switch case that fits in 1 line not ((cleansed_line.find('case ') != -1 or cleansed_line.find('default:') != -1) and cleansed_line.find('break;') != -1)): error(filename, linenum, 'whitespace/newline', 0, 'More than one command on the same line') # Some more style checks CheckBraces(filename, clean_lines, linenum, error) CheckTrailingSemicolon(filename, clean_lines, linenum, error) CheckEmptyBlockBody(filename, clean_lines, linenum, error) CheckAccess(filename, clean_lines, linenum, nesting_state, error) CheckSpacing(filename, clean_lines, linenum, nesting_state, error) CheckOperatorSpacing(filename, clean_lines, linenum, error) CheckParenthesisSpacing(filename, clean_lines, linenum, error) CheckCommaSpacing(filename, clean_lines, linenum, error) CheckBracesSpacing(filename, clean_lines, linenum, error) CheckSpacingForFunctionCall(filename, clean_lines, linenum, error) CheckRValueReference(filename, clean_lines, linenum, nesting_state, error) CheckCheck(filename, clean_lines, linenum, error) CheckAltTokens(filename, clean_lines, linenum, error) classinfo = nesting_state.InnermostClass() if classinfo: CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
[ "def", "CheckStyle", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "file_extension", ",", "nesting_state", ",", "error", ")", ":", "# Don't use \"elided\" lines here, otherwise we can't check commented lines.", "# Don't want to use \"raw\" either, because we don't want to check inside C++11", "# raw strings,", "raw_lines", "=", "clean_lines", ".", "lines_without_raw_strings", "line", "=", "raw_lines", "[", "linenum", "]", "if", "line", ".", "find", "(", "'\\t'", ")", "!=", "-", "1", ":", "error", "(", "filename", ",", "linenum", ",", "'whitespace/tab'", ",", "1", ",", "'Tab found; better to use spaces'", ")", "# One or three blank spaces at the beginning of the line is weird; it's", "# hard to reconcile that with 2-space indents.", "# NOTE: here are the conditions rob pike used for his tests. Mine aren't", "# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces", "# if(RLENGTH > 20) complain = 0;", "# if(match($0, \" +(error|private|public|protected):\")) complain = 0;", "# if(match(prev, \"&& *$\")) complain = 0;", "# if(match(prev, \"\\\\|\\\\| *$\")) complain = 0;", "# if(match(prev, \"[\\\",=><] *$\")) complain = 0;", "# if(match($0, \" <<\")) complain = 0;", "# if(match(prev, \" +for \\\\(\")) complain = 0;", "# if(prevodd && match(prevprev, \" +for \\\\(\")) complain = 0;", "scope_or_label_pattern", "=", "r'\\s*\\w+\\s*:\\s*\\\\?$'", "classinfo", "=", "nesting_state", ".", "InnermostClass", "(", ")", "initial_spaces", "=", "0", "cleansed_line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "while", "initial_spaces", "<", "len", "(", "line", ")", "and", "line", "[", "initial_spaces", "]", "==", "' '", ":", "initial_spaces", "+=", "1", "if", "line", "and", "line", "[", "-", "1", "]", ".", "isspace", "(", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'whitespace/end_of_line'", ",", "4", ",", "'Line ends in whitespace. Consider deleting these extra spaces.'", ")", "# There are certain situations we allow one space, notably for", "# section labels, and also lines containing multi-line raw strings.", "elif", "(", "(", "initial_spaces", "==", "1", "or", "initial_spaces", "==", "3", ")", "and", "not", "Match", "(", "scope_or_label_pattern", ",", "cleansed_line", ")", "and", "not", "(", "clean_lines", ".", "raw_lines", "[", "linenum", "]", "!=", "line", "and", "Match", "(", "r'^\\s*\"\"'", ",", "line", ")", ")", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'whitespace/indent'", ",", "3", ",", "'Weird number of spaces at line-start. '", "'Are you using a 2-space indent?'", ")", "# Check if the line is a header guard.", "is_header_guard", "=", "False", "if", "file_extension", "==", "'h'", ":", "cppvar", "=", "GetHeaderGuardCPPVariable", "(", "filename", ")", "if", "(", "line", ".", "startswith", "(", "'#ifndef %s'", "%", "cppvar", ")", "or", "line", ".", "startswith", "(", "'#define %s'", "%", "cppvar", ")", "or", "line", ".", "startswith", "(", "'#endif // %s'", "%", "cppvar", ")", ")", ":", "is_header_guard", "=", "True", "# #include lines and header guards can be long, since there's no clean way to", "# split them.", "#", "# URLs can be long too. It's possible to split these, but it makes them", "# harder to cut&paste.", "#", "# The \"$Id:...$\" comment may also get very long without it being the", "# developers fault.", "if", "(", "not", "line", ".", "startswith", "(", "'#include'", ")", "and", "not", "is_header_guard", "and", "not", "Match", "(", "r'^\\s*//.*http(s?)://\\S*$'", ",", "line", ")", "and", "not", "Match", "(", "r'^// \\$Id:.*#[0-9]+ \\$$'", ",", "line", ")", ")", ":", "line_width", "=", "GetLineWidth", "(", "line", ")", "extended_length", "=", "int", "(", "(", "_line_length", "*", "1.25", ")", ")", "if", "line_width", ">", "extended_length", ":", "error", "(", "filename", ",", "linenum", ",", "'whitespace/line_length'", ",", "4", ",", "'Lines should very rarely be longer than %i characters'", "%", "extended_length", ")", "elif", "line_width", ">", "_line_length", ":", "error", "(", "filename", ",", "linenum", ",", "'whitespace/line_length'", ",", "2", ",", "'Lines should be <= %i characters long'", "%", "_line_length", ")", "if", "(", "cleansed_line", ".", "count", "(", "';'", ")", ">", "1", "and", "# for loops are allowed two ;'s (and may run over two lines).", "cleansed_line", ".", "find", "(", "'for'", ")", "==", "-", "1", "and", "(", "GetPreviousNonBlankLine", "(", "clean_lines", ",", "linenum", ")", "[", "0", "]", ".", "find", "(", "'for'", ")", "==", "-", "1", "or", "GetPreviousNonBlankLine", "(", "clean_lines", ",", "linenum", ")", "[", "0", "]", ".", "find", "(", "';'", ")", "!=", "-", "1", ")", "and", "# It's ok to have many commands in a switch case that fits in 1 line", "not", "(", "(", "cleansed_line", ".", "find", "(", "'case '", ")", "!=", "-", "1", "or", "cleansed_line", ".", "find", "(", "'default:'", ")", "!=", "-", "1", ")", "and", "cleansed_line", ".", "find", "(", "'break;'", ")", "!=", "-", "1", ")", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'whitespace/newline'", ",", "0", ",", "'More than one command on the same line'", ")", "# Some more style checks", "CheckBraces", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "CheckTrailingSemicolon", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "CheckEmptyBlockBody", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "CheckAccess", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "nesting_state", ",", "error", ")", "CheckSpacing", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "nesting_state", ",", "error", ")", "CheckOperatorSpacing", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "CheckParenthesisSpacing", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "CheckCommaSpacing", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "CheckBracesSpacing", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "CheckSpacingForFunctionCall", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "CheckRValueReference", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "nesting_state", ",", "error", ")", "CheckCheck", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "CheckAltTokens", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "classinfo", "=", "nesting_state", ".", "InnermostClass", "(", ")", "if", "classinfo", ":", "CheckSectionSpacing", "(", "filename", ",", "clean_lines", ",", "classinfo", ",", "linenum", ",", "error", ")" ]
47.042735
19.376068
def _get_session(self, request): """ Authenticate the request dict :param dict request: request dict built from user input :raises SMCConnectionError: failure to connect :return: python requests session :rtype: requests.Session """ _session = requests.session() # empty session response = _session.post(**request) logger.info('Using SMC API version: %s', self.api_version) if response.status_code != 200: raise SMCConnectionError( 'Login failed, HTTP status code: %s and reason: %s' % ( response.status_code, response.reason)) return _session
[ "def", "_get_session", "(", "self", ",", "request", ")", ":", "_session", "=", "requests", ".", "session", "(", ")", "# empty session", "response", "=", "_session", ".", "post", "(", "*", "*", "request", ")", "logger", ".", "info", "(", "'Using SMC API version: %s'", ",", "self", ".", "api_version", ")", "if", "response", ".", "status_code", "!=", "200", ":", "raise", "SMCConnectionError", "(", "'Login failed, HTTP status code: %s and reason: %s'", "%", "(", "response", ".", "status_code", ",", "response", ".", "reason", ")", ")", "return", "_session" ]
36.631579
14
def register(self, new_outputs, *args, **kwargs): """ Register outputs and metadata. * ``initial_value`` - used in dynamic calculations * ``size`` - number of elements per timestep * ``uncertainty`` - in percent of nominal value * ``variance`` - dictionary of covariances, diagonal is square of uncertianties, no units * ``jacobian`` - dictionary of sensitivities dxi/dfj * ``isconstant`` - ``True`` if constant, ``False`` if periodic * ``isproperty`` - ``True`` if output stays at last value during thresholds, ``False`` if reverts to initial value * ``timeseries`` - name of corresponding time series output, ``None`` if no time series * ``output_source`` - name :param new_outputs: new outputs to register. """ kwargs.update(zip(self.meta_names, args)) # call super method super(OutputRegistry, self).register(new_outputs, **kwargs)
[ "def", "register", "(", "self", ",", "new_outputs", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "zip", "(", "self", ".", "meta_names", ",", "args", ")", ")", "# call super method", "super", "(", "OutputRegistry", ",", "self", ")", ".", "register", "(", "new_outputs", ",", "*", "*", "kwargs", ")" ]
44.272727
18.181818
def post_reply_groups(self, group_id, topic_id, entry_id, attachment=None, message=None): """ Post a reply. Add a reply to an entry in a discussion topic. Returns a json representation of the created reply (see documentation for 'replies' method) on success. May require (depending on the topic) that the user has posted in the topic. If it is required, and the user has not posted, will respond with a 403 Forbidden status and the body 'require_initial_post'. """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # REQUIRED - PATH - topic_id """ID""" path["topic_id"] = topic_id # REQUIRED - PATH - entry_id """ID""" path["entry_id"] = entry_id # OPTIONAL - message """The body of the entry.""" if message is not None: data["message"] = message # OPTIONAL - attachment """a multipart/form-data form-field-style attachment. Attachments larger than 1 kilobyte are subject to quota restrictions.""" if attachment is not None: data["attachment"] = attachment self.logger.debug("POST /api/v1/groups/{group_id}/discussion_topics/{topic_id}/entries/{entry_id}/replies with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/groups/{group_id}/discussion_topics/{topic_id}/entries/{entry_id}/replies".format(**path), data=data, params=params, no_data=True)
[ "def", "post_reply_groups", "(", "self", ",", "group_id", ",", "topic_id", ",", "entry_id", ",", "attachment", "=", "None", ",", "message", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - group_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"group_id\"", "]", "=", "group_id", "# REQUIRED - PATH - topic_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"topic_id\"", "]", "=", "topic_id", "# REQUIRED - PATH - entry_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"entry_id\"", "]", "=", "entry_id", "# OPTIONAL - message\r", "\"\"\"The body of the entry.\"\"\"", "if", "message", "is", "not", "None", ":", "data", "[", "\"message\"", "]", "=", "message", "# OPTIONAL - attachment\r", "\"\"\"a multipart/form-data form-field-style\r\n attachment. Attachments larger than 1 kilobyte are subject to quota\r\n restrictions.\"\"\"", "if", "attachment", "is", "not", "None", ":", "data", "[", "\"attachment\"", "]", "=", "attachment", "self", ".", "logger", ".", "debug", "(", "\"POST /api/v1/groups/{group_id}/discussion_topics/{topic_id}/entries/{entry_id}/replies with query params: {params} and form data: {data}\"", ".", "format", "(", "params", "=", "params", ",", "data", "=", "data", ",", "*", "*", "path", ")", ")", "return", "self", ".", "generic_request", "(", "\"POST\"", ",", "\"/api/v1/groups/{group_id}/discussion_topics/{topic_id}/entries/{entry_id}/replies\"", ".", "format", "(", "*", "*", "path", ")", ",", "data", "=", "data", ",", "params", "=", "params", ",", "no_data", "=", "True", ")" ]
39.690476
24.428571
def drawBackground(self, painter, rect): """When an area of the window is exposed, we just copy out of the server-side, off-screen pixmap to that area. """ if not self.pixmap: return x1, y1, x2, y2 = rect.getCoords() width = x2 - x1 + 1 height = y2 - y1 + 1 # redraw the screen from backing pixmap rect = QtCore.QRect(x1, y1, width, height) painter.drawPixmap(rect, self.pixmap, rect)
[ "def", "drawBackground", "(", "self", ",", "painter", ",", "rect", ")", ":", "if", "not", "self", ".", "pixmap", ":", "return", "x1", ",", "y1", ",", "x2", ",", "y2", "=", "rect", ".", "getCoords", "(", ")", "width", "=", "x2", "-", "x1", "+", "1", "height", "=", "y2", "-", "y1", "+", "1", "# redraw the screen from backing pixmap", "rect", "=", "QtCore", ".", "QRect", "(", "x1", ",", "y1", ",", "width", ",", "height", ")", "painter", ".", "drawPixmap", "(", "rect", ",", "self", ".", "pixmap", ",", "rect", ")" ]
35.769231
10.846154
def subSampleWholeColumn(spikeTrains, colIndices, cellsPerColumn, currentTS, timeWindow): """ Obtains subsample from matrix of spike trains by considering the cells in columns specified by colIndices. Thus, it returns a matrix of spike trains of cells within the same column. @param spikeTrains (array) array containing the spike trains of cells in the TM @param colIndices (array) array containing the indices of columns whose spike trains should be sampled @param cellsPerColumn (int) number of cells per column in the TM @param currentTS (int) time-step upper bound of sample (sample will go from time-step 0 up to currentTS) @param timeWindow (int) number of time-steps to sample from the spike trains @return subSpikeTrains (array) spike train matrix sampled from the total spike train matrix """ numColumns = np.shape(colIndices)[0] numCells = numColumns * cellsPerColumn if currentTS > 0 and currentTS < timeWindow: subSpikeTrains = np.zeros((numCells, currentTS), dtype = "uint32") for i in range(numColumns): currentCol = colIndices[i] initialCell = cellsPerColumn * currentCol for j in range(cellsPerColumn): subSpikeTrains[(cellsPerColumn*i) + j,:] = spikeTrains[initialCell + j,:] elif currentTS > 0 and currentTS >= timeWindow: subSpikeTrains = np.zeros((numCells, timeWindow), dtype = "uint32") for i in range(numColumns): currentCol = colIndices[i] initialCell = cellsPerColumn * currentCol for j in range(cellsPerColumn): subSpikeTrains[(cellsPerColumn*i) + j,:] = spikeTrains[initialCell + j,(currentTS-timeWindow):currentTS] elif currentTS == 0: # This option takes the whole spike train history totalTS = np.shape(spikeTrains)[1] subSpikeTrains = np.zeros((numCells, totalTS), dtype = "uint32") for i in range(numColumns): currentCol = colIndices[i] initialCell = cellsPerColumn * currentCol for j in range(cellsPerColumn): subSpikeTrains[(cellsPerColumn*i) + j,:] = spikeTrains[initialCell + j,:] elif currentTS < 0: totalTS = np.shape(spikeTrains)[1] subSpikeTrains = np.zeros((numCells, timeWindow), dtype = "uint32") rnd = random.randrange(totalTS - timeWindow) print "Starting from timestep: " + str(rnd) for i in range(numColumns): currentCol = colIndices[i] initialCell = cellsPerColumn * currentCol for j in range(cellsPerColumn): subSpikeTrains[(cellsPerColumn*i) + j,:] = spikeTrains[initialCell + j,rnd:(rnd+timeWindow)] return subSpikeTrains
[ "def", "subSampleWholeColumn", "(", "spikeTrains", ",", "colIndices", ",", "cellsPerColumn", ",", "currentTS", ",", "timeWindow", ")", ":", "numColumns", "=", "np", ".", "shape", "(", "colIndices", ")", "[", "0", "]", "numCells", "=", "numColumns", "*", "cellsPerColumn", "if", "currentTS", ">", "0", "and", "currentTS", "<", "timeWindow", ":", "subSpikeTrains", "=", "np", ".", "zeros", "(", "(", "numCells", ",", "currentTS", ")", ",", "dtype", "=", "\"uint32\"", ")", "for", "i", "in", "range", "(", "numColumns", ")", ":", "currentCol", "=", "colIndices", "[", "i", "]", "initialCell", "=", "cellsPerColumn", "*", "currentCol", "for", "j", "in", "range", "(", "cellsPerColumn", ")", ":", "subSpikeTrains", "[", "(", "cellsPerColumn", "*", "i", ")", "+", "j", ",", ":", "]", "=", "spikeTrains", "[", "initialCell", "+", "j", ",", ":", "]", "elif", "currentTS", ">", "0", "and", "currentTS", ">=", "timeWindow", ":", "subSpikeTrains", "=", "np", ".", "zeros", "(", "(", "numCells", ",", "timeWindow", ")", ",", "dtype", "=", "\"uint32\"", ")", "for", "i", "in", "range", "(", "numColumns", ")", ":", "currentCol", "=", "colIndices", "[", "i", "]", "initialCell", "=", "cellsPerColumn", "*", "currentCol", "for", "j", "in", "range", "(", "cellsPerColumn", ")", ":", "subSpikeTrains", "[", "(", "cellsPerColumn", "*", "i", ")", "+", "j", ",", ":", "]", "=", "spikeTrains", "[", "initialCell", "+", "j", ",", "(", "currentTS", "-", "timeWindow", ")", ":", "currentTS", "]", "elif", "currentTS", "==", "0", ":", "# This option takes the whole spike train history", "totalTS", "=", "np", ".", "shape", "(", "spikeTrains", ")", "[", "1", "]", "subSpikeTrains", "=", "np", ".", "zeros", "(", "(", "numCells", ",", "totalTS", ")", ",", "dtype", "=", "\"uint32\"", ")", "for", "i", "in", "range", "(", "numColumns", ")", ":", "currentCol", "=", "colIndices", "[", "i", "]", "initialCell", "=", "cellsPerColumn", "*", "currentCol", "for", "j", "in", "range", "(", "cellsPerColumn", ")", ":", "subSpikeTrains", "[", "(", "cellsPerColumn", "*", "i", ")", "+", "j", ",", ":", "]", "=", "spikeTrains", "[", "initialCell", "+", "j", ",", ":", "]", "elif", "currentTS", "<", "0", ":", "totalTS", "=", "np", ".", "shape", "(", "spikeTrains", ")", "[", "1", "]", "subSpikeTrains", "=", "np", ".", "zeros", "(", "(", "numCells", ",", "timeWindow", ")", ",", "dtype", "=", "\"uint32\"", ")", "rnd", "=", "random", ".", "randrange", "(", "totalTS", "-", "timeWindow", ")", "print", "\"Starting from timestep: \"", "+", "str", "(", "rnd", ")", "for", "i", "in", "range", "(", "numColumns", ")", ":", "currentCol", "=", "colIndices", "[", "i", "]", "initialCell", "=", "cellsPerColumn", "*", "currentCol", "for", "j", "in", "range", "(", "cellsPerColumn", ")", ":", "subSpikeTrains", "[", "(", "cellsPerColumn", "*", "i", ")", "+", "j", ",", ":", "]", "=", "spikeTrains", "[", "initialCell", "+", "j", ",", "rnd", ":", "(", "rnd", "+", "timeWindow", ")", "]", "return", "subSpikeTrains" ]
50.48
21.96
def run(connection): """ Parse arguments and start upload/download """ parser = argparse.ArgumentParser(description=""" Process database dumps. Either download of upload a dump file to the objectstore. downloads the latest dump and uploads with envronment and date into given container destination """) parser.add_argument( 'location', nargs=1, default=f'{DUMPFOLDER}/database.{ENV}.dump', help="Dump file location") parser.add_argument( 'objectstore', nargs=1, default=f'{DUMPFOLDER}/database.{ENV}.dump', help="Dump file objectstore location") parser.add_argument( '--download-db', action='store_true', dest='download', default=False, help='Download db') parser.add_argument( '--upload-db', action='store_true', dest='upload', default=False, help='Upload db') parser.add_argument( '--container', action='store_true', dest='container', default=False, help='Upload db') parser.add_argument( '--days', type=int, nargs=1, dest='days', default=0, help='Days to keep database dumps') args = parser.parse_args() if args.days: LOG.debug('Cleanup old dumps') remove_old_dumps( connection, args.objectstore[0], args.days[0]) elif args.download: download_database( connection, args.objectstore[0], args.location[0]) elif args.upload: upload_database( connection, args.objectstore[0], args.location[0]) else: parser.print_help()
[ "def", "run", "(", "connection", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"\"\"\n Process database dumps.\n\n Either download of upload a dump file to the objectstore.\n\n downloads the latest dump and uploads with envronment and date\n into given container destination\n \"\"\"", ")", "parser", ".", "add_argument", "(", "'location'", ",", "nargs", "=", "1", ",", "default", "=", "f'{DUMPFOLDER}/database.{ENV}.dump'", ",", "help", "=", "\"Dump file location\"", ")", "parser", ".", "add_argument", "(", "'objectstore'", ",", "nargs", "=", "1", ",", "default", "=", "f'{DUMPFOLDER}/database.{ENV}.dump'", ",", "help", "=", "\"Dump file objectstore location\"", ")", "parser", ".", "add_argument", "(", "'--download-db'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'download'", ",", "default", "=", "False", ",", "help", "=", "'Download db'", ")", "parser", ".", "add_argument", "(", "'--upload-db'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'upload'", ",", "default", "=", "False", ",", "help", "=", "'Upload db'", ")", "parser", ".", "add_argument", "(", "'--container'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'container'", ",", "default", "=", "False", ",", "help", "=", "'Upload db'", ")", "parser", ".", "add_argument", "(", "'--days'", ",", "type", "=", "int", ",", "nargs", "=", "1", ",", "dest", "=", "'days'", ",", "default", "=", "0", ",", "help", "=", "'Days to keep database dumps'", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "if", "args", ".", "days", ":", "LOG", ".", "debug", "(", "'Cleanup old dumps'", ")", "remove_old_dumps", "(", "connection", ",", "args", ".", "objectstore", "[", "0", "]", ",", "args", ".", "days", "[", "0", "]", ")", "elif", "args", ".", "download", ":", "download_database", "(", "connection", ",", "args", ".", "objectstore", "[", "0", "]", ",", "args", ".", "location", "[", "0", "]", ")", "elif", "args", ".", "upload", ":", "upload_database", "(", "connection", ",", "args", ".", "objectstore", "[", "0", "]", ",", "args", ".", "location", "[", "0", "]", ")", "else", ":", "parser", ".", "print_help", "(", ")" ]
23.898551
19.115942
def get_dict_definition(self, dict, get_list=False): """Get the definition name of the given dict. Args: dict: dict to test. get_list: if set to true, return a list of definition that match the body. if False, only return the first. Returns: The definition name or None if the dict does not match any definition. If get_list is True, return a list of definition_name. """ list_def_candidate = [] for definition_name in self.specification['definitions'].keys(): if self.validate_definition(definition_name, dict): if not get_list: return definition_name list_def_candidate.append(definition_name) if get_list: return list_def_candidate return None
[ "def", "get_dict_definition", "(", "self", ",", "dict", ",", "get_list", "=", "False", ")", ":", "list_def_candidate", "=", "[", "]", "for", "definition_name", "in", "self", ".", "specification", "[", "'definitions'", "]", ".", "keys", "(", ")", ":", "if", "self", ".", "validate_definition", "(", "definition_name", ",", "dict", ")", ":", "if", "not", "get_list", ":", "return", "definition_name", "list_def_candidate", ".", "append", "(", "definition_name", ")", "if", "get_list", ":", "return", "list_def_candidate", "return", "None" ]
39.904762
19.809524
def gemfury(): """ Push to gem fury, a repo with private options """ # fury login # fury push dist/*.gz --as=YOUR_ACCT # fury push dist/*.whl --as=YOUR_ACCT cp = subprocess.run(("fury login --as={0}".format(GEM_FURY).split(" ")), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False, check=True) print(cp.stdout) about = {} with open(os.path.join(SRC, PROJECT_NAME, "__version__.py")) as f: exec(f.read(), about) version = Version(about["__version__"]) print("Have version : " + str(version)) print("Preparing to upload") if version not in get_versions(): for kind in ["gz", "whl"]: try: files = glob.glob("{0}dist/*.{1}".format(SRC.replace(".", ""), kind)) for file_name in files: cp = subprocess.run(("fury push {0} --as={1}".format(file_name, GEM_FURY).split(" ")), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False, check=True) print("result of fury push") for stream in [cp.stdout, cp.stderr]: if stream: for line in stream.decode().split("\n"): print(line) except subprocess.CalledProcessError as cpe: print("result of fury push- got error") for stream in [cp.stdout, cp.stderr]: if stream: for line in stream.decode().split("\n"): print(line) print(cpe) raise
[ "def", "gemfury", "(", ")", ":", "# fury login", "# fury push dist/*.gz --as=YOUR_ACCT", "# fury push dist/*.whl --as=YOUR_ACCT", "cp", "=", "subprocess", ".", "run", "(", "(", "\"fury login --as={0}\"", ".", "format", "(", "GEM_FURY", ")", ".", "split", "(", "\" \"", ")", ")", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "shell", "=", "False", ",", "check", "=", "True", ")", "print", "(", "cp", ".", "stdout", ")", "about", "=", "{", "}", "with", "open", "(", "os", ".", "path", ".", "join", "(", "SRC", ",", "PROJECT_NAME", ",", "\"__version__.py\"", ")", ")", "as", "f", ":", "exec", "(", "f", ".", "read", "(", ")", ",", "about", ")", "version", "=", "Version", "(", "about", "[", "\"__version__\"", "]", ")", "print", "(", "\"Have version : \"", "+", "str", "(", "version", ")", ")", "print", "(", "\"Preparing to upload\"", ")", "if", "version", "not", "in", "get_versions", "(", ")", ":", "for", "kind", "in", "[", "\"gz\"", ",", "\"whl\"", "]", ":", "try", ":", "files", "=", "glob", ".", "glob", "(", "\"{0}dist/*.{1}\"", ".", "format", "(", "SRC", ".", "replace", "(", "\".\"", ",", "\"\"", ")", ",", "kind", ")", ")", "for", "file_name", "in", "files", ":", "cp", "=", "subprocess", ".", "run", "(", "(", "\"fury push {0} --as={1}\"", ".", "format", "(", "file_name", ",", "GEM_FURY", ")", ".", "split", "(", "\" \"", ")", ")", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "shell", "=", "False", ",", "check", "=", "True", ")", "print", "(", "\"result of fury push\"", ")", "for", "stream", "in", "[", "cp", ".", "stdout", ",", "cp", ".", "stderr", "]", ":", "if", "stream", ":", "for", "line", "in", "stream", ".", "decode", "(", ")", ".", "split", "(", "\"\\n\"", ")", ":", "print", "(", "line", ")", "except", "subprocess", ".", "CalledProcessError", "as", "cpe", ":", "print", "(", "\"result of fury push- got error\"", ")", "for", "stream", "in", "[", "cp", ".", "stdout", ",", "cp", ".", "stderr", "]", ":", "if", "stream", ":", "for", "line", "in", "stream", ".", "decode", "(", ")", ".", "split", "(", "\"\\n\"", ")", ":", "print", "(", "line", ")", "print", "(", "cpe", ")", "raise" ]
40.047619
18.714286
def createShadowHandlerWithName(self, shadowName, isPersistentSubscribe): """ **Description** Create a device shadow handler using the specified shadow name and isPersistentSubscribe. **Syntax** .. code:: python # Create a device shadow handler for shadow named "Bot1", using persistent subscription Bot1Shadow = myAWSIoTMQTTShadowClient.createShadowHandlerWithName("Bot1", True) # Create a device shadow handler for shadow named "Bot2", using non-persistent subscription Bot2Shadow = myAWSIoTMQTTShadowClient.createShadowHandlerWithName("Bot2", False) **Parameters** *shadowName* - Name of the device shadow. *isPersistentSubscribe* - Whether to unsubscribe from shadow response (accepted/rejected) topics when there is a response. Will subscribe at the first time the shadow request is made and will not unsubscribe if isPersistentSubscribe is set. **Returns** AWSIoTPythonSDK.core.shadow.deviceShadow.deviceShadow object, which exposes the device shadow interface. """ # Create and return a deviceShadow instance return deviceShadow.deviceShadow(shadowName, isPersistentSubscribe, self._shadowManager)
[ "def", "createShadowHandlerWithName", "(", "self", ",", "shadowName", ",", "isPersistentSubscribe", ")", ":", "# Create and return a deviceShadow instance", "return", "deviceShadow", ".", "deviceShadow", "(", "shadowName", ",", "isPersistentSubscribe", ",", "self", ".", "_shadowManager", ")" ]
41.5
36.366667
def clear_high_level_pars(self): """ clears all high level pars display boxes """ for val in ['mean_type', 'dec', 'inc', 'alpha95', 'K', 'R', 'n_lines', 'n_planes']: COMMAND = """self.%s_window.SetValue("")""" % (val) exec(COMMAND) if self.ie_open: for val in ['mean_type', 'dec', 'inc', 'alpha95', 'K', 'R', 'n_lines', 'n_planes']: COMMAND = """self.ie.%s_window.SetValue("")""" % (val) exec(COMMAND) self.set_mean_stats_color()
[ "def", "clear_high_level_pars", "(", "self", ")", ":", "for", "val", "in", "[", "'mean_type'", ",", "'dec'", ",", "'inc'", ",", "'alpha95'", ",", "'K'", ",", "'R'", ",", "'n_lines'", ",", "'n_planes'", "]", ":", "COMMAND", "=", "\"\"\"self.%s_window.SetValue(\"\")\"\"\"", "%", "(", "val", ")", "exec", "(", "COMMAND", ")", "if", "self", ".", "ie_open", ":", "for", "val", "in", "[", "'mean_type'", ",", "'dec'", ",", "'inc'", ",", "'alpha95'", ",", "'K'", ",", "'R'", ",", "'n_lines'", ",", "'n_planes'", "]", ":", "COMMAND", "=", "\"\"\"self.ie.%s_window.SetValue(\"\")\"\"\"", "%", "(", "val", ")", "exec", "(", "COMMAND", ")", "self", ".", "set_mean_stats_color", "(", ")" ]
44.5
14.083333
def supports_version(self, guid_set): """ Returns whether a GUID set in for_appversions format is compatbile with the current supported applications list. """ # Don't let the test run if we haven't parsed install.rdf yet. if self.supported_versions is None: raise Exception('Early compatibility test run before install.rdf ' 'was parsed.') return self._compare_version(requirements=guid_set, support=self.supported_versions)
[ "def", "supports_version", "(", "self", ",", "guid_set", ")", ":", "# Don't let the test run if we haven't parsed install.rdf yet.", "if", "self", ".", "supported_versions", "is", "None", ":", "raise", "Exception", "(", "'Early compatibility test run before install.rdf '", "'was parsed.'", ")", "return", "self", ".", "_compare_version", "(", "requirements", "=", "guid_set", ",", "support", "=", "self", ".", "supported_versions", ")" ]
42.076923
19.307692
def _get_prm_file(file_name=None, search_order=None): """returns name of the prm file""" if file_name is not None: if os.path.isfile(file_name): return file_name else: logger.info("Could not find the prm-file") default_name = prms._prm_default_name prm_globtxt = prms._prm_globtxt script_dir = os.path.abspath(os.path.dirname(__file__)) search_path = dict() search_path["curdir"] = os.path.abspath(os.path.dirname(sys.argv[0])) search_path["filedir"] = script_dir search_path["userdir"] = os.path.expanduser("~") if search_order is None: search_order = ["userdir", ] # ["curdir","filedir", "userdir",] else: search_order = search_order # The default name for the prm file is at the moment in the script-dir,@ # while default searching is in the userdir (yes, I know): prm_default = os.path.join(script_dir, default_name) # -searching----------------------- search_dict = OrderedDict() for key in search_order: search_dict[key] = [None, None] prm_directory = search_path[key] default_file = os.path.join(prm_directory, default_name) if os.path.isfile(default_file): # noinspection PyTypeChecker search_dict[key][0] = default_file prm_globtxt_full = os.path.join(prm_directory, prm_globtxt) user_files = glob.glob(prm_globtxt_full) for f in user_files: if os.path.basename(f) != os.path.basename(default_file): search_dict[key][1] = f break # -selecting---------------------- prm_file = None for key, file_list in search_dict.items(): if file_list[-1]: prm_file = file_list[-1] break else: if not prm_file: prm_file = file_list[0] if prm_file: prm_filename = prm_file else: prm_filename = prm_default return prm_filename
[ "def", "_get_prm_file", "(", "file_name", "=", "None", ",", "search_order", "=", "None", ")", ":", "if", "file_name", "is", "not", "None", ":", "if", "os", ".", "path", ".", "isfile", "(", "file_name", ")", ":", "return", "file_name", "else", ":", "logger", ".", "info", "(", "\"Could not find the prm-file\"", ")", "default_name", "=", "prms", ".", "_prm_default_name", "prm_globtxt", "=", "prms", ".", "_prm_globtxt", "script_dir", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", "search_path", "=", "dict", "(", ")", "search_path", "[", "\"curdir\"", "]", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "sys", ".", "argv", "[", "0", "]", ")", ")", "search_path", "[", "\"filedir\"", "]", "=", "script_dir", "search_path", "[", "\"userdir\"", "]", "=", "os", ".", "path", ".", "expanduser", "(", "\"~\"", ")", "if", "search_order", "is", "None", ":", "search_order", "=", "[", "\"userdir\"", ",", "]", "# [\"curdir\",\"filedir\", \"userdir\",]", "else", ":", "search_order", "=", "search_order", "# The default name for the prm file is at the moment in the script-dir,@", "# while default searching is in the userdir (yes, I know):", "prm_default", "=", "os", ".", "path", ".", "join", "(", "script_dir", ",", "default_name", ")", "# -searching-----------------------", "search_dict", "=", "OrderedDict", "(", ")", "for", "key", "in", "search_order", ":", "search_dict", "[", "key", "]", "=", "[", "None", ",", "None", "]", "prm_directory", "=", "search_path", "[", "key", "]", "default_file", "=", "os", ".", "path", ".", "join", "(", "prm_directory", ",", "default_name", ")", "if", "os", ".", "path", ".", "isfile", "(", "default_file", ")", ":", "# noinspection PyTypeChecker", "search_dict", "[", "key", "]", "[", "0", "]", "=", "default_file", "prm_globtxt_full", "=", "os", ".", "path", ".", "join", "(", "prm_directory", ",", "prm_globtxt", ")", "user_files", "=", "glob", ".", "glob", "(", "prm_globtxt_full", ")", "for", "f", "in", "user_files", ":", "if", "os", ".", "path", ".", "basename", "(", "f", ")", "!=", "os", ".", "path", ".", "basename", "(", "default_file", ")", ":", "search_dict", "[", "key", "]", "[", "1", "]", "=", "f", "break", "# -selecting----------------------", "prm_file", "=", "None", "for", "key", ",", "file_list", "in", "search_dict", ".", "items", "(", ")", ":", "if", "file_list", "[", "-", "1", "]", ":", "prm_file", "=", "file_list", "[", "-", "1", "]", "break", "else", ":", "if", "not", "prm_file", ":", "prm_file", "=", "file_list", "[", "0", "]", "if", "prm_file", ":", "prm_filename", "=", "prm_file", "else", ":", "prm_filename", "=", "prm_default", "return", "prm_filename" ]
30.09375
19.1875
def aux(self, aux): """ Changes the aux port :params aux: Console port (integer) or None to free the port """ if aux == self._aux: return if self._aux: self._manager.port_manager.release_tcp_port(self._aux, self._project) self._aux = None if aux is not None: self._aux = self._manager.port_manager.reserve_tcp_port(aux, self._project) log.info("{module}: '{name}' [{id}]: aux port set to {port}".format(module=self.manager.module_name, name=self.name, id=self.id, port=aux))
[ "def", "aux", "(", "self", ",", "aux", ")", ":", "if", "aux", "==", "self", ".", "_aux", ":", "return", "if", "self", ".", "_aux", ":", "self", ".", "_manager", ".", "port_manager", ".", "release_tcp_port", "(", "self", ".", "_aux", ",", "self", ".", "_project", ")", "self", ".", "_aux", "=", "None", "if", "aux", "is", "not", "None", ":", "self", ".", "_aux", "=", "self", ".", "_manager", ".", "port_manager", ".", "reserve_tcp_port", "(", "aux", ",", "self", ".", "_project", ")", "log", ".", "info", "(", "\"{module}: '{name}' [{id}]: aux port set to {port}\"", ".", "format", "(", "module", "=", "self", ".", "manager", ".", "module_name", ",", "name", "=", "self", ".", "name", ",", "id", "=", "self", ".", "id", ",", "port", "=", "aux", ")", ")" ]
42.894737
30.263158
def set_column_count(self, count): """Sets the table column count. Args: count (int): column of rows """ current_row_count = self.row_count() current_column_count = self.column_count() if count > current_column_count: cl = TableEditableItem if self._editable else TableItem for r_key in self.children.keys(): row = self.children[r_key] for i in range(current_column_count, count): row.append(cl(), str(i)) if self._editable: row.children[str(i)].onchange.connect( self.on_item_changed, int(r_key), int(i)) self._update_first_row() elif count < current_column_count: for row in self.children.values(): for i in range(count, current_column_count): row.remove_child(row.children[str(i)]) self._column_count = count
[ "def", "set_column_count", "(", "self", ",", "count", ")", ":", "current_row_count", "=", "self", ".", "row_count", "(", ")", "current_column_count", "=", "self", ".", "column_count", "(", ")", "if", "count", ">", "current_column_count", ":", "cl", "=", "TableEditableItem", "if", "self", ".", "_editable", "else", "TableItem", "for", "r_key", "in", "self", ".", "children", ".", "keys", "(", ")", ":", "row", "=", "self", ".", "children", "[", "r_key", "]", "for", "i", "in", "range", "(", "current_column_count", ",", "count", ")", ":", "row", ".", "append", "(", "cl", "(", ")", ",", "str", "(", "i", ")", ")", "if", "self", ".", "_editable", ":", "row", ".", "children", "[", "str", "(", "i", ")", "]", ".", "onchange", ".", "connect", "(", "self", ".", "on_item_changed", ",", "int", "(", "r_key", ")", ",", "int", "(", "i", ")", ")", "self", ".", "_update_first_row", "(", ")", "elif", "count", "<", "current_column_count", ":", "for", "row", "in", "self", ".", "children", ".", "values", "(", ")", ":", "for", "i", "in", "range", "(", "count", ",", "current_column_count", ")", ":", "row", ".", "remove_child", "(", "row", ".", "children", "[", "str", "(", "i", ")", "]", ")", "self", ".", "_column_count", "=", "count" ]
42.347826
11.130435
def session(self): """ Returns the current db session """ if not self.__session: self.__session = dal.get_default_session() return self.__session
[ "def", "session", "(", "self", ")", ":", "if", "not", "self", ".", "__session", ":", "self", ".", "__session", "=", "dal", ".", "get_default_session", "(", ")", "return", "self", ".", "__session" ]
35.4
11.4
def _get(auth, path, fmt, autobox=True, params=None): ''' Issue a GET request to the XNAT REST API and box the response content. Example: >>> import yaxil >>> from yaxil import Format >>> auth = yaxil.XnatAuth(url='...', username='...', password='...') >>> yaxil.get(auth, '/data/experiments', Format.JSON) :param auth: XNAT authentication :type auth: :mod:`yaxil.XnatAuth` :param path: API URL path :type path: str :param fmt: API result format :type fmt: :mod:`yaxil.Format` :param autobox: Autobox response content into an appropriate reader or other data structure :type autobox: bool :param params: Additional query parameters :type params: dict :returns: Tuple of (URL, :mod:`dict` | :mod:`xml.etree.ElementTree` | :mod:`csv.reader` | :mod:`str`) :rtype: tuple ''' if not params: params = {} url = "%s/%s" % (auth.url.rstrip('/'), path.lstrip('/')) params["format"] = fmt logger.debug("issuing http request %s", url) logger.debug("query parameters %s", params) r = requests.get(url, params=params, auth=(auth.username, auth.password), verify=CHECK_CERTIFICATE) if r.status_code != requests.codes.ok: raise RestApiError("response not ok (%s) from %s" % (r.status_code, r.url)) if not r.content: raise RestApiError("response is empty from %s" % r.url) if autobox: return r.url,_autobox(r.text, fmt) else: return r.url,r.content
[ "def", "_get", "(", "auth", ",", "path", ",", "fmt", ",", "autobox", "=", "True", ",", "params", "=", "None", ")", ":", "if", "not", "params", ":", "params", "=", "{", "}", "url", "=", "\"%s/%s\"", "%", "(", "auth", ".", "url", ".", "rstrip", "(", "'/'", ")", ",", "path", ".", "lstrip", "(", "'/'", ")", ")", "params", "[", "\"format\"", "]", "=", "fmt", "logger", ".", "debug", "(", "\"issuing http request %s\"", ",", "url", ")", "logger", ".", "debug", "(", "\"query parameters %s\"", ",", "params", ")", "r", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "params", ",", "auth", "=", "(", "auth", ".", "username", ",", "auth", ".", "password", ")", ",", "verify", "=", "CHECK_CERTIFICATE", ")", "if", "r", ".", "status_code", "!=", "requests", ".", "codes", ".", "ok", ":", "raise", "RestApiError", "(", "\"response not ok (%s) from %s\"", "%", "(", "r", ".", "status_code", ",", "r", ".", "url", ")", ")", "if", "not", "r", ".", "content", ":", "raise", "RestApiError", "(", "\"response is empty from %s\"", "%", "r", ".", "url", ")", "if", "autobox", ":", "return", "r", ".", "url", ",", "_autobox", "(", "r", ".", "text", ",", "fmt", ")", "else", ":", "return", "r", ".", "url", ",", "r", ".", "content" ]
38.842105
22.105263
def UpsertUser(self, database_link, user, options=None): """Upserts a user. :param str database_link: The link to the database. :param dict user: The Azure Cosmos user to upsert. :param dict options: The request options for the request. :return: The upserted User. :rtype: dict """ if options is None: options = {} database_id, path = self._GetDatabaseIdWithPathForUser(database_link, user) return self.Upsert(user, path, 'users', database_id, None, options)
[ "def", "UpsertUser", "(", "self", ",", "database_link", ",", "user", ",", "options", "=", "None", ")", ":", "if", "options", "is", "None", ":", "options", "=", "{", "}", "database_id", ",", "path", "=", "self", ".", "_GetDatabaseIdWithPathForUser", "(", "database_link", ",", "user", ")", "return", "self", ".", "Upsert", "(", "user", ",", "path", ",", "'users'", ",", "database_id", ",", "None", ",", "options", ")" ]
29.708333
14.416667
def get_sentences(self, root_element, block_tags): """Returns a list of plain-text sentences by iterating through XML tags except for those listed in block_tags.""" sentences = [] for element in root_element: if not self.any_ends_with(block_tags, element.tag): # tag not in block_tags if element.text is not None and not re.match('^\s*$', element.text): sentences.extend(self.sentence_tokenize(element.text)) sentences.extend(self.get_sentences(element, block_tags)) f = open('sentence_debug.txt', 'w') for s in sentences: f.write(s.lower() + '\n') f.close() return sentences
[ "def", "get_sentences", "(", "self", ",", "root_element", ",", "block_tags", ")", ":", "sentences", "=", "[", "]", "for", "element", "in", "root_element", ":", "if", "not", "self", ".", "any_ends_with", "(", "block_tags", ",", "element", ".", "tag", ")", ":", "# tag not in block_tags", "if", "element", ".", "text", "is", "not", "None", "and", "not", "re", ".", "match", "(", "'^\\s*$'", ",", "element", ".", "text", ")", ":", "sentences", ".", "extend", "(", "self", ".", "sentence_tokenize", "(", "element", ".", "text", ")", ")", "sentences", ".", "extend", "(", "self", ".", "get_sentences", "(", "element", ",", "block_tags", ")", ")", "f", "=", "open", "(", "'sentence_debug.txt'", ",", "'w'", ")", "for", "s", "in", "sentences", ":", "f", ".", "write", "(", "s", ".", "lower", "(", ")", "+", "'\\n'", ")", "f", ".", "close", "(", ")", "return", "sentences" ]
45.705882
16.764706
def delete_vip_request(self, vip_request_ids): """ Method to delete vip request param vip_request_ids: vip_request ids """ uri = 'api/v3/vip-request/%s/' % vip_request_ids return super(ApiVipRequest, self).delete(uri)
[ "def", "delete_vip_request", "(", "self", ",", "vip_request_ids", ")", ":", "uri", "=", "'api/v3/vip-request/%s/'", "%", "vip_request_ids", "return", "super", "(", "ApiVipRequest", ",", "self", ")", ".", "delete", "(", "uri", ")" ]
28.777778
13.888889
def run(cls, version=None): """ Test runner method; is called by parent class defined in suite.py. :param version: B2G version string to test against :return: bool PASS/FAIL status """ try: dumper = certdump() versions = dumper.nssversion_via_marionette() except Exception as e: # TODO: too broad exception cls.log_status('FAIL', 'Failed to gather information from the device via Marionette: %s' % e) return False if version is None: cls.log_status('FAIL', 'NSS version check requires a B2G version.\nReported component versions:\n%s' % ( '\n'.join(["%s: %s" % (k, versions[k]) for k in versions]))) return False reported_version = versions['NSS_Version'] if version not in nssversion.b2g_version_to_hginfo: cls.log_status('FAIL', 'No version comparison data for B2G %s.\nReported NSS component versions:\n%s' % ( version, '\n'.join(["%s: %s" % (k, versions[k]) for k in versions]))) return False expected_version = nssversion.b2g_version_to_hginfo[version]['release_nss_version'] # Fail if reported version is a downgrade if nssversion.first_older_than_second(reported_version, expected_version): cls.log_status('FAIL', 'NSS downgrade detected. Expecting at least version %s.\n' 'Reported versions:\n%s' % ( expected_version, '\n'.join(["%s: %s" % (k, versions[k]) for k in versions]))) return False # Pass if NSS version was upgraded. if nssversion.first_older_than_second(expected_version, reported_version): cls.log_status('PASS', 'NSS more recent than release version %s. Reported component versions:\n%s' % ( expected_version, '\n'.join(["%s: %s" % (k, versions[k]) for k in versions]))) return True # Else device has reported the expected version. cls.log_status('PASS', 'NSS version reported as expected. Reported component versions:\n%s' % ( '\n'.join(["%s: %s" % (k, versions[k]) for k in versions]))) return True
[ "def", "run", "(", "cls", ",", "version", "=", "None", ")", ":", "try", ":", "dumper", "=", "certdump", "(", ")", "versions", "=", "dumper", ".", "nssversion_via_marionette", "(", ")", "except", "Exception", "as", "e", ":", "# TODO: too broad exception", "cls", ".", "log_status", "(", "'FAIL'", ",", "'Failed to gather information from the device via Marionette: %s'", "%", "e", ")", "return", "False", "if", "version", "is", "None", ":", "cls", ".", "log_status", "(", "'FAIL'", ",", "'NSS version check requires a B2G version.\\nReported component versions:\\n%s'", "%", "(", "'\\n'", ".", "join", "(", "[", "\"%s: %s\"", "%", "(", "k", ",", "versions", "[", "k", "]", ")", "for", "k", "in", "versions", "]", ")", ")", ")", "return", "False", "reported_version", "=", "versions", "[", "'NSS_Version'", "]", "if", "version", "not", "in", "nssversion", ".", "b2g_version_to_hginfo", ":", "cls", ".", "log_status", "(", "'FAIL'", ",", "'No version comparison data for B2G %s.\\nReported NSS component versions:\\n%s'", "%", "(", "version", ",", "'\\n'", ".", "join", "(", "[", "\"%s: %s\"", "%", "(", "k", ",", "versions", "[", "k", "]", ")", "for", "k", "in", "versions", "]", ")", ")", ")", "return", "False", "expected_version", "=", "nssversion", ".", "b2g_version_to_hginfo", "[", "version", "]", "[", "'release_nss_version'", "]", "# Fail if reported version is a downgrade", "if", "nssversion", ".", "first_older_than_second", "(", "reported_version", ",", "expected_version", ")", ":", "cls", ".", "log_status", "(", "'FAIL'", ",", "'NSS downgrade detected. Expecting at least version %s.\\n'", "'Reported versions:\\n%s'", "%", "(", "expected_version", ",", "'\\n'", ".", "join", "(", "[", "\"%s: %s\"", "%", "(", "k", ",", "versions", "[", "k", "]", ")", "for", "k", "in", "versions", "]", ")", ")", ")", "return", "False", "# Pass if NSS version was upgraded.", "if", "nssversion", ".", "first_older_than_second", "(", "expected_version", ",", "reported_version", ")", ":", "cls", ".", "log_status", "(", "'PASS'", ",", "'NSS more recent than release version %s. Reported component versions:\\n%s'", "%", "(", "expected_version", ",", "'\\n'", ".", "join", "(", "[", "\"%s: %s\"", "%", "(", "k", ",", "versions", "[", "k", "]", ")", "for", "k", "in", "versions", "]", ")", ")", ")", "return", "True", "# Else device has reported the expected version.", "cls", ".", "log_status", "(", "'PASS'", ",", "'NSS version reported as expected. Reported component versions:\\n%s'", "%", "(", "'\\n'", ".", "join", "(", "[", "\"%s: %s\"", "%", "(", "k", ",", "versions", "[", "k", "]", ")", "for", "k", "in", "versions", "]", ")", ")", ")", "return", "True" ]
47.608696
31.782609
def load_ref_spectra(): """ Pull out wl, flux, ivar from files of training spectra """ data_dir = "/Users/annaho/Data/AAOmega/ref_spectra" # Load the files & count the number of training objects ff = glob.glob("%s/*.txt" %data_dir) nstars = len(ff) print("We have %s training objects" %nstars) # Read the first file to get the wavelength array f = ff[0] data = Table.read(f, format="ascii.fast_no_header") wl = data['col1'] npix = len(wl) print("We have %s pixels" %npix) tr_flux = np.zeros((nstars,npix)) tr_ivar = np.zeros(tr_flux.shape) for i,f in enumerate(ff): data = Table.read(f, format="ascii.fast_no_header") flux = data['col2'] tr_flux[i,:] = flux sigma = data['col3'] tr_ivar[i,:] = 1.0 / sigma**2 return np.array(ff), wl, tr_flux, tr_ivar
[ "def", "load_ref_spectra", "(", ")", ":", "data_dir", "=", "\"/Users/annaho/Data/AAOmega/ref_spectra\"", "# Load the files & count the number of training objects", "ff", "=", "glob", ".", "glob", "(", "\"%s/*.txt\"", "%", "data_dir", ")", "nstars", "=", "len", "(", "ff", ")", "print", "(", "\"We have %s training objects\"", "%", "nstars", ")", "# Read the first file to get the wavelength array", "f", "=", "ff", "[", "0", "]", "data", "=", "Table", ".", "read", "(", "f", ",", "format", "=", "\"ascii.fast_no_header\"", ")", "wl", "=", "data", "[", "'col1'", "]", "npix", "=", "len", "(", "wl", ")", "print", "(", "\"We have %s pixels\"", "%", "npix", ")", "tr_flux", "=", "np", ".", "zeros", "(", "(", "nstars", ",", "npix", ")", ")", "tr_ivar", "=", "np", ".", "zeros", "(", "tr_flux", ".", "shape", ")", "for", "i", ",", "f", "in", "enumerate", "(", "ff", ")", ":", "data", "=", "Table", ".", "read", "(", "f", ",", "format", "=", "\"ascii.fast_no_header\"", ")", "flux", "=", "data", "[", "'col2'", "]", "tr_flux", "[", "i", ",", ":", "]", "=", "flux", "sigma", "=", "data", "[", "'col3'", "]", "tr_ivar", "[", "i", ",", ":", "]", "=", "1.0", "/", "sigma", "**", "2", "return", "np", ".", "array", "(", "ff", ")", ",", "wl", ",", "tr_flux", ",", "tr_ivar" ]
32.192308
16.038462
def polynomial_sign(poly_surface, degree): r"""Determine the "sign" of a polynomial on the reference triangle. .. note:: This is used **only** by :meth:`Surface._compute_valid` (which is in turn used to compute / cache the :attr:`Surface.is_valid` property). Checks if a polynomial :math:`p(s, t)` is positive, negative or mixed sign on the reference triangle. Does this by utilizing the B |eacute| zier form of :math:`p`: it is a convex combination of the Bernstein basis (real numbers) hence if the Bernstein basis is all positive, the polynomial must be. If the values are mixed, then we can recursively subdivide until we are in a region where the coefficients are all one sign. Args: poly_surface (numpy.ndarray): 2D array (with 1 row) of control points for a "surface", i.e. a bivariate polynomial. degree (int): The degree of the surface / polynomial given by ``poly_surface``. Returns: int: The sign of the polynomial. Will be one of ``-1``, ``1`` or ``0``. A value of ``0`` indicates a mixed sign or the zero polynomial. Raises: ValueError: If no conclusion is reached after the maximum number of subdivisions. """ # The indices where the corner nodes in a surface are. corner_indices = (0, degree, -1) sub_polys = [poly_surface] signs = set() for _ in six.moves.xrange(_MAX_POLY_SUBDIVISIONS): undecided = [] for poly in sub_polys: # First add all the signs of the corner nodes. signs.update(_SIGN(poly[0, corner_indices]).astype(int)) # Then check if the ``poly`` nodes are **uniformly** one sign. if np.all(poly == 0.0): signs.add(0) elif np.all(poly > 0.0): signs.add(1) elif np.all(poly < 0.0): signs.add(-1) else: undecided.append(poly) if len(signs) > 1: return 0 sub_polys = functools.reduce( operator.add, [subdivide_nodes(poly, degree) for poly in undecided], (), ) if not sub_polys: break if sub_polys: raise ValueError( "Did not reach a conclusion after max subdivisions", _MAX_POLY_SUBDIVISIONS, ) else: # NOTE: We are guaranteed that ``len(signs) <= 1``. return signs.pop()
[ "def", "polynomial_sign", "(", "poly_surface", ",", "degree", ")", ":", "# The indices where the corner nodes in a surface are.", "corner_indices", "=", "(", "0", ",", "degree", ",", "-", "1", ")", "sub_polys", "=", "[", "poly_surface", "]", "signs", "=", "set", "(", ")", "for", "_", "in", "six", ".", "moves", ".", "xrange", "(", "_MAX_POLY_SUBDIVISIONS", ")", ":", "undecided", "=", "[", "]", "for", "poly", "in", "sub_polys", ":", "# First add all the signs of the corner nodes.", "signs", ".", "update", "(", "_SIGN", "(", "poly", "[", "0", ",", "corner_indices", "]", ")", ".", "astype", "(", "int", ")", ")", "# Then check if the ``poly`` nodes are **uniformly** one sign.", "if", "np", ".", "all", "(", "poly", "==", "0.0", ")", ":", "signs", ".", "add", "(", "0", ")", "elif", "np", ".", "all", "(", "poly", ">", "0.0", ")", ":", "signs", ".", "add", "(", "1", ")", "elif", "np", ".", "all", "(", "poly", "<", "0.0", ")", ":", "signs", ".", "add", "(", "-", "1", ")", "else", ":", "undecided", ".", "append", "(", "poly", ")", "if", "len", "(", "signs", ")", ">", "1", ":", "return", "0", "sub_polys", "=", "functools", ".", "reduce", "(", "operator", ".", "add", ",", "[", "subdivide_nodes", "(", "poly", ",", "degree", ")", "for", "poly", "in", "undecided", "]", ",", "(", ")", ",", ")", "if", "not", "sub_polys", ":", "break", "if", "sub_polys", ":", "raise", "ValueError", "(", "\"Did not reach a conclusion after max subdivisions\"", ",", "_MAX_POLY_SUBDIVISIONS", ",", ")", "else", ":", "# NOTE: We are guaranteed that ``len(signs) <= 1``.", "return", "signs", ".", "pop", "(", ")" ]
33.479452
21.890411
def upsert(self, _id, dct, attribute="_id"): """Update or Insert a new document :param str _id: The document id :param dict dct: The dictionary to set on the document :param str attribute: The attribute to query for to find the object to set this data on :returns: JSON Mongo client response including the "n" key to show number of objects effected """ mongo_response = yield self.update(_id, dct, upsert=True, attribute=attribute) raise Return(mongo_response)
[ "def", "upsert", "(", "self", ",", "_id", ",", "dct", ",", "attribute", "=", "\"_id\"", ")", ":", "mongo_response", "=", "yield", "self", ".", "update", "(", "_id", ",", "dct", ",", "upsert", "=", "True", ",", "attribute", "=", "attribute", ")", "raise", "Return", "(", "mongo_response", ")" ]
43
26.083333
def humanize_size(size): """Create a nice human readable representation of the given number (understood as bytes) using the "KiB" and "MiB" suffixes to indicate kibibytes and mebibytes. A kibibyte is defined as 1024 bytes (as opposed to a kilobyte which is 1000 bytes) and a mibibyte is 1024**2 bytes (as opposed to a megabyte which is 1000**2 bytes). :param size: the number to convert :type size: int :returns: the human readable representation of size :rtype: str """ for factor, format_string in ((1, '%i'), (1024, '%iKiB'), (1024 * 1024, '%.1fMiB')): if size / factor < 1024: return format_string % (size / factor) return format_string % (size / factor)
[ "def", "humanize_size", "(", "size", ")", ":", "for", "factor", ",", "format_string", "in", "(", "(", "1", ",", "'%i'", ")", ",", "(", "1024", ",", "'%iKiB'", ")", ",", "(", "1024", "*", "1024", ",", "'%.1fMiB'", ")", ")", ":", "if", "size", "/", "factor", "<", "1024", ":", "return", "format_string", "%", "(", "size", "/", "factor", ")", "return", "format_string", "%", "(", "size", "/", "factor", ")" ]
43.222222
15.833333
def _setOutputNames(self,rootname,suffix='_drz'): """ Define the default output filenames for drizzle products, these are based on the original rootname of the image filename should be just 1 filename, so call this in a loop for chip names contained inside a file. """ # Define FITS output filenames for intermediate products # Build names based on final DRIZZLE output name # where 'output' normally would have been created # by 'process_input()' # outFinal = rootname+suffix+'.fits' outSci = rootname+suffix+'_sci.fits' outWeight = rootname+suffix+'_wht.fits' outContext = rootname+suffix+'_ctx.fits' outMedian = rootname+'_med.fits' # Build names based on input name origFilename = self._filename.replace('.fits','_OrIg.fits') outSky = rootname + '_sky.fits' outSingle = rootname+'_single_sci.fits' outSWeight = rootname+'_single_wht.fits' crCorImage = rootname+'_crclean.fits' # Build outputNames dictionary fnames={ 'origFilename': origFilename, 'outFinal': outFinal, 'outMedian': outMedian, 'outSci': outSci, 'outWeight': outWeight, 'outContext': outContext, 'outSingle': outSingle, 'outSWeight': outSWeight, 'outSContext': None, 'outSky': outSky, 'crcorImage': crCorImage, 'ivmFile': None } return fnames
[ "def", "_setOutputNames", "(", "self", ",", "rootname", ",", "suffix", "=", "'_drz'", ")", ":", "# Define FITS output filenames for intermediate products", "# Build names based on final DRIZZLE output name", "# where 'output' normally would have been created", "# by 'process_input()'", "#", "outFinal", "=", "rootname", "+", "suffix", "+", "'.fits'", "outSci", "=", "rootname", "+", "suffix", "+", "'_sci.fits'", "outWeight", "=", "rootname", "+", "suffix", "+", "'_wht.fits'", "outContext", "=", "rootname", "+", "suffix", "+", "'_ctx.fits'", "outMedian", "=", "rootname", "+", "'_med.fits'", "# Build names based on input name", "origFilename", "=", "self", ".", "_filename", ".", "replace", "(", "'.fits'", ",", "'_OrIg.fits'", ")", "outSky", "=", "rootname", "+", "'_sky.fits'", "outSingle", "=", "rootname", "+", "'_single_sci.fits'", "outSWeight", "=", "rootname", "+", "'_single_wht.fits'", "crCorImage", "=", "rootname", "+", "'_crclean.fits'", "# Build outputNames dictionary", "fnames", "=", "{", "'origFilename'", ":", "origFilename", ",", "'outFinal'", ":", "outFinal", ",", "'outMedian'", ":", "outMedian", ",", "'outSci'", ":", "outSci", ",", "'outWeight'", ":", "outWeight", ",", "'outContext'", ":", "outContext", ",", "'outSingle'", ":", "outSingle", ",", "'outSWeight'", ":", "outSWeight", ",", "'outSContext'", ":", "None", ",", "'outSky'", ":", "outSky", ",", "'crcorImage'", ":", "crCorImage", ",", "'ivmFile'", ":", "None", "}", "return", "fnames" ]
36.47619
13.142857
def auth(username, password): """ Middleware implementing authentication via LOGIN. Most of the time this middleware needs to be placed *after* TLS. :param username: Username to login with. :param password: Password of the user. """ def middleware(conn): conn.login(username, password) return middleware
[ "def", "auth", "(", "username", ",", "password", ")", ":", "def", "middleware", "(", "conn", ")", ":", "conn", ".", "login", "(", "username", ",", "password", ")", "return", "middleware" ]
28.083333
12.083333
def run(self, args): """ Load the pecan app, prepare the locals, sets the banner, and invokes the python shell. """ super(ShellCommand, self).run(args) # load the application app = self.load_app() # prepare the locals locs = dict(__name__='pecan-admin') locs['wsgiapp'] = app locs['app'] = TestApp(app) model = self.load_model(app.config) if model: locs['model'] = model # insert the pecan locals from pecan import abort, conf, redirect, request, response locs['abort'] = abort locs['conf'] = conf locs['redirect'] = redirect locs['request'] = request locs['response'] = response # prepare the banner banner = ' The following objects are available:\n' banner += ' %-10s - This project\'s WSGI App instance\n' % 'wsgiapp' banner += ' %-10s - The current configuration\n' % 'conf' banner += ' %-10s - webtest.TestApp wrapped around wsgiapp\n' % 'app' if model: model_name = getattr( model, '__module__', getattr(model, '__name__', 'model') ) banner += ' %-10s - Models from %s\n' % ('model', model_name) self.invoke_shell(locs, banner)
[ "def", "run", "(", "self", ",", "args", ")", ":", "super", "(", "ShellCommand", ",", "self", ")", ".", "run", "(", "args", ")", "# load the application", "app", "=", "self", ".", "load_app", "(", ")", "# prepare the locals", "locs", "=", "dict", "(", "__name__", "=", "'pecan-admin'", ")", "locs", "[", "'wsgiapp'", "]", "=", "app", "locs", "[", "'app'", "]", "=", "TestApp", "(", "app", ")", "model", "=", "self", ".", "load_model", "(", "app", ".", "config", ")", "if", "model", ":", "locs", "[", "'model'", "]", "=", "model", "# insert the pecan locals", "from", "pecan", "import", "abort", ",", "conf", ",", "redirect", ",", "request", ",", "response", "locs", "[", "'abort'", "]", "=", "abort", "locs", "[", "'conf'", "]", "=", "conf", "locs", "[", "'redirect'", "]", "=", "redirect", "locs", "[", "'request'", "]", "=", "request", "locs", "[", "'response'", "]", "=", "response", "# prepare the banner", "banner", "=", "' The following objects are available:\\n'", "banner", "+=", "' %-10s - This project\\'s WSGI App instance\\n'", "%", "'wsgiapp'", "banner", "+=", "' %-10s - The current configuration\\n'", "%", "'conf'", "banner", "+=", "' %-10s - webtest.TestApp wrapped around wsgiapp\\n'", "%", "'app'", "if", "model", ":", "model_name", "=", "getattr", "(", "model", ",", "'__module__'", ",", "getattr", "(", "model", ",", "'__name__'", ",", "'model'", ")", ")", "banner", "+=", "' %-10s - Models from %s\\n'", "%", "(", "'model'", ",", "model_name", ")", "self", ".", "invoke_shell", "(", "locs", ",", "banner", ")" ]
32.097561
17.268293
def GetKey(self, public_key_hash): """ Get the KeyPair belonging to the public key hash. Args: public_key_hash (UInt160): a public key hash to get the KeyPair for. Returns: KeyPair: If successful, the KeyPair belonging to the public key hash, otherwise None """ if public_key_hash.ToBytes() in self._keys.keys(): return self._keys[public_key_hash.ToBytes()] return None
[ "def", "GetKey", "(", "self", ",", "public_key_hash", ")", ":", "if", "public_key_hash", ".", "ToBytes", "(", ")", "in", "self", ".", "_keys", ".", "keys", "(", ")", ":", "return", "self", ".", "_keys", "[", "public_key_hash", ".", "ToBytes", "(", ")", "]", "return", "None" ]
34.692308
23.461538
def disconnect(self, timeout_sec=TIMEOUT_SEC): """Disconnect from the device. If not disconnected within the specified timeout then an exception is thrown. """ # Remove all the services, characteristics, and descriptors from the # lists of those items. Do this before disconnecting because they wont't # be accessible afterwards. for service in self.list_services(): for char in service.list_characteristics(): for desc in char.list_descriptors(): descriptor_list().remove(desc) characteristic_list().remove(char) service_list().remove(service) # Now disconnect. self._central_manager.cancelPeripheralConnection_(self._peripheral) if not self._disconnected.wait(timeout_sec): raise RuntimeError('Failed to disconnect to device within timeout period!')
[ "def", "disconnect", "(", "self", ",", "timeout_sec", "=", "TIMEOUT_SEC", ")", ":", "# Remove all the services, characteristics, and descriptors from the", "# lists of those items. Do this before disconnecting because they wont't", "# be accessible afterwards.", "for", "service", "in", "self", ".", "list_services", "(", ")", ":", "for", "char", "in", "service", ".", "list_characteristics", "(", ")", ":", "for", "desc", "in", "char", ".", "list_descriptors", "(", ")", ":", "descriptor_list", "(", ")", ".", "remove", "(", "desc", ")", "characteristic_list", "(", ")", ".", "remove", "(", "char", ")", "service_list", "(", ")", ".", "remove", "(", "service", ")", "# Now disconnect.", "self", ".", "_central_manager", ".", "cancelPeripheralConnection_", "(", "self", ".", "_peripheral", ")", "if", "not", "self", ".", "_disconnected", ".", "wait", "(", "timeout_sec", ")", ":", "raise", "RuntimeError", "(", "'Failed to disconnect to device within timeout period!'", ")" ]
53.235294
14.941176
def subscribe(self, id, name, port): """Send a Subscribe request to a remote machine""" sub = gntp.core.GNTPSubscribe() sub.add_header('Subscriber-ID', id) sub.add_header('Subscriber-Name', name) sub.add_header('Subscriber-Port', port) if self.password: sub.set_password(self.password, self.passwordHash) self.add_origin_info(sub) self.subscribe_hook(sub) return self._send('subscribe', sub)
[ "def", "subscribe", "(", "self", ",", "id", ",", "name", ",", "port", ")", ":", "sub", "=", "gntp", ".", "core", ".", "GNTPSubscribe", "(", ")", "sub", ".", "add_header", "(", "'Subscriber-ID'", ",", "id", ")", "sub", ".", "add_header", "(", "'Subscriber-Name'", ",", "name", ")", "sub", ".", "add_header", "(", "'Subscriber-Port'", ",", "port", ")", "if", "self", ".", "password", ":", "sub", ".", "set_password", "(", "self", ".", "password", ",", "self", ".", "passwordHash", ")", "self", ".", "add_origin_info", "(", "sub", ")", "self", ".", "subscribe_hook", "(", "sub", ")", "return", "self", ".", "_send", "(", "'subscribe'", ",", "sub", ")" ]
30.923077
12.307692
def filename(self): """ Name if the MOP formatted file to parse. @rtype: basestring @return: filename """ if self._filename is None: self._filename = storage.get_file(self.basename, self.ccd, ext=self.extension, version=self.type, prefix=self.prefix) return self._filename
[ "def", "filename", "(", "self", ")", ":", "if", "self", ".", "_filename", "is", "None", ":", "self", ".", "_filename", "=", "storage", ".", "get_file", "(", "self", ".", "basename", ",", "self", ".", "ccd", ",", "ext", "=", "self", ".", "extension", ",", "version", "=", "self", ".", "type", ",", "prefix", "=", "self", ".", "prefix", ")", "return", "self", ".", "_filename" ]
39.384615
14.153846
def cookie( url, name, value, expires=None): '''Return a new Cookie using a slightly more friendly API than that provided by six.moves.http_cookiejar @param name The cookie name {str} @param value The cookie value {str} @param url The URL path of the cookie {str} @param expires The expiry time of the cookie {datetime}. If provided, it must be a naive timestamp in UTC. ''' u = urlparse(url) domain = u.hostname if '.' not in domain and not _is_ip_addr(domain): domain += ".local" port = str(u.port) if u.port is not None else None secure = u.scheme == 'https' if expires is not None: if expires.tzinfo is not None: raise ValueError('Cookie expiration must be a naive datetime') expires = (expires - datetime(1970, 1, 1)).total_seconds() return http_cookiejar.Cookie( version=0, name=name, value=value, port=port, port_specified=port is not None, domain=domain, domain_specified=True, domain_initial_dot=False, path=u.path, path_specified=True, secure=secure, expires=expires, discard=False, comment=None, comment_url=None, rest=None, rfc2109=False, )
[ "def", "cookie", "(", "url", ",", "name", ",", "value", ",", "expires", "=", "None", ")", ":", "u", "=", "urlparse", "(", "url", ")", "domain", "=", "u", ".", "hostname", "if", "'.'", "not", "in", "domain", "and", "not", "_is_ip_addr", "(", "domain", ")", ":", "domain", "+=", "\".local\"", "port", "=", "str", "(", "u", ".", "port", ")", "if", "u", ".", "port", "is", "not", "None", "else", "None", "secure", "=", "u", ".", "scheme", "==", "'https'", "if", "expires", "is", "not", "None", ":", "if", "expires", ".", "tzinfo", "is", "not", "None", ":", "raise", "ValueError", "(", "'Cookie expiration must be a naive datetime'", ")", "expires", "=", "(", "expires", "-", "datetime", "(", "1970", ",", "1", ",", "1", ")", ")", ".", "total_seconds", "(", ")", "return", "http_cookiejar", ".", "Cookie", "(", "version", "=", "0", ",", "name", "=", "name", ",", "value", "=", "value", ",", "port", "=", "port", ",", "port_specified", "=", "port", "is", "not", "None", ",", "domain", "=", "domain", ",", "domain_specified", "=", "True", ",", "domain_initial_dot", "=", "False", ",", "path", "=", "u", ".", "path", ",", "path_specified", "=", "True", ",", "secure", "=", "secure", ",", "expires", "=", "expires", ",", "discard", "=", "False", ",", "comment", "=", "None", ",", "comment_url", "=", "None", ",", "rest", "=", "None", ",", "rfc2109", "=", "False", ",", ")" ]
29.837209
17.697674
def _WritePathInfo(self, client_id, path_info): """Writes a single path info record for given client.""" if client_id not in self.metadatas: raise db.UnknownClientError(client_id) path_record = self._GetPathRecord(client_id, path_info) path_record.AddPathInfo(path_info) parent_path_info = path_info.GetParent() if parent_path_info is not None: parent_path_record = self._GetPathRecord(client_id, parent_path_info) parent_path_record.AddChild(path_info)
[ "def", "_WritePathInfo", "(", "self", ",", "client_id", ",", "path_info", ")", ":", "if", "client_id", "not", "in", "self", ".", "metadatas", ":", "raise", "db", ".", "UnknownClientError", "(", "client_id", ")", "path_record", "=", "self", ".", "_GetPathRecord", "(", "client_id", ",", "path_info", ")", "path_record", ".", "AddPathInfo", "(", "path_info", ")", "parent_path_info", "=", "path_info", ".", "GetParent", "(", ")", "if", "parent_path_info", "is", "not", "None", ":", "parent_path_record", "=", "self", ".", "_GetPathRecord", "(", "client_id", ",", "parent_path_info", ")", "parent_path_record", ".", "AddChild", "(", "path_info", ")" ]
40.5
13.333333
def process(self, context, data): """ Default interface for microservices. Process the input data for the input context. """ self.context = context # Find the entityID for the SP that initiated the flow. try: sp_entity_id = context.state.state_dict['SATOSA_BASE']['requester'] except KeyError as err: satosa_logging(logger, logging.ERROR, "Unable to determine the entityID for the SP requester", context.state) return super().process(context, data) satosa_logging(logger, logging.DEBUG, "entityID for the SP requester is {}".format(sp_entity_id), context.state) # Get the configuration for the SP. if sp_entity_id in self.config.keys(): config = self.config[sp_entity_id] else: config = self.config['default'] satosa_logging(logger, logging.DEBUG, "Using config {}".format(self._filter_config(config)), context.state) # Ignore this SP entirely if so configured. if config['ignore']: satosa_logging(logger, logging.INFO, "Ignoring SP {}".format(sp_entity_id), None) return super().process(context, data) # The list of values for the LDAP search filters that will be tried in order to find the # LDAP directory record for the user. filter_values = [] # Loop over the configured list of identifiers from the IdP to consider and find # asserted values to construct the ordered list of values for the LDAP search filters. for candidate in config['ordered_identifier_candidates']: value = self._construct_filter_value(candidate, data) # If we have constructed a non empty value then add it as the next filter value # to use when searching for the user record. if value: filter_values.append(value) satosa_logging(logger, logging.DEBUG, "Added search filter value {} to list of search filters".format(value), context.state) # Initialize an empty LDAP record. The first LDAP record found using the ordered # list of search filter values will be the record used. record = None try: connection = config['connection'] for filter_val in filter_values: if record: break search_filter = '({0}={1})'.format(config['ldap_identifier_attribute'], filter_val) satosa_logging(logger, logging.DEBUG, "Constructed search filter {}".format(search_filter), context.state) satosa_logging(logger, logging.DEBUG, "Querying LDAP server...", context.state) message_id = connection.search(config['search_base'], search_filter, attributes=config['search_return_attributes'].keys()) responses = connection.get_response(message_id)[0] satosa_logging(logger, logging.DEBUG, "Done querying LDAP server", context.state) satosa_logging(logger, logging.DEBUG, "LDAP server returned {} records".format(len(responses)), context.state) # for now consider only the first record found (if any) if len(responses) > 0: if len(responses) > 1: satosa_logging(logger, logging.WARN, "LDAP server returned {} records using search filter value {}".format(len(responses), filter_val), context.state) record = responses[0] break except LDAPException as err: satosa_logging(logger, logging.ERROR, "Caught LDAP exception: {}".format(err), context.state) except LdapAttributeStoreError as err: satosa_logging(logger, logging.ERROR, "Caught LDAP Attribute Store exception: {}".format(err), context.state) except Exception as err: satosa_logging(logger, logging.ERROR, "Caught unhandled exception: {}".format(err), context.state) else: err = None finally: if err: return super().process(context, data) # Before using a found record, if any, to populate attributes # clear any attributes incoming to this microservice if so configured. if config['clear_input_attributes']: satosa_logging(logger, logging.DEBUG, "Clearing values for these input attributes: {}".format(data.attributes), context.state) data.attributes = {} # Use a found record, if any, to populate attributes and input for NameID if record: satosa_logging(logger, logging.DEBUG, "Using record with DN {}".format(record["dn"]), context.state) satosa_logging(logger, logging.DEBUG, "Record with DN {} has attributes {}".format(record["dn"], record["attributes"]), context.state) # Populate attributes as configured. self._populate_attributes(config, record, context, data) # Populate input for NameID if configured. SATOSA core does the hashing of input # to create a persistent NameID. self._populate_input_for_name_id(config, record, context, data) else: satosa_logging(logger, logging.WARN, "No record found in LDAP so no attributes will be added", context.state) on_ldap_search_result_empty = config['on_ldap_search_result_empty'] if on_ldap_search_result_empty: # Redirect to the configured URL with # the entityIDs for the target SP and IdP used by the user # as query string parameters (URL encoded). encoded_sp_entity_id = urllib.parse.quote_plus(sp_entity_id) encoded_idp_entity_id = urllib.parse.quote_plus(data.auth_info.issuer) url = "{}?sp={}&idp={}".format(on_ldap_search_result_empty, encoded_sp_entity_id, encoded_idp_entity_id) satosa_logging(logger, logging.INFO, "Redirecting to {}".format(url), context.state) return Redirect(url) satosa_logging(logger, logging.DEBUG, "Returning data.attributes {}".format(str(data.attributes)), context.state) return super().process(context, data)
[ "def", "process", "(", "self", ",", "context", ",", "data", ")", ":", "self", ".", "context", "=", "context", "# Find the entityID for the SP that initiated the flow.", "try", ":", "sp_entity_id", "=", "context", ".", "state", ".", "state_dict", "[", "'SATOSA_BASE'", "]", "[", "'requester'", "]", "except", "KeyError", "as", "err", ":", "satosa_logging", "(", "logger", ",", "logging", ".", "ERROR", ",", "\"Unable to determine the entityID for the SP requester\"", ",", "context", ".", "state", ")", "return", "super", "(", ")", ".", "process", "(", "context", ",", "data", ")", "satosa_logging", "(", "logger", ",", "logging", ".", "DEBUG", ",", "\"entityID for the SP requester is {}\"", ".", "format", "(", "sp_entity_id", ")", ",", "context", ".", "state", ")", "# Get the configuration for the SP.", "if", "sp_entity_id", "in", "self", ".", "config", ".", "keys", "(", ")", ":", "config", "=", "self", ".", "config", "[", "sp_entity_id", "]", "else", ":", "config", "=", "self", ".", "config", "[", "'default'", "]", "satosa_logging", "(", "logger", ",", "logging", ".", "DEBUG", ",", "\"Using config {}\"", ".", "format", "(", "self", ".", "_filter_config", "(", "config", ")", ")", ",", "context", ".", "state", ")", "# Ignore this SP entirely if so configured.", "if", "config", "[", "'ignore'", "]", ":", "satosa_logging", "(", "logger", ",", "logging", ".", "INFO", ",", "\"Ignoring SP {}\"", ".", "format", "(", "sp_entity_id", ")", ",", "None", ")", "return", "super", "(", ")", ".", "process", "(", "context", ",", "data", ")", "# The list of values for the LDAP search filters that will be tried in order to find the", "# LDAP directory record for the user.", "filter_values", "=", "[", "]", "# Loop over the configured list of identifiers from the IdP to consider and find", "# asserted values to construct the ordered list of values for the LDAP search filters.", "for", "candidate", "in", "config", "[", "'ordered_identifier_candidates'", "]", ":", "value", "=", "self", ".", "_construct_filter_value", "(", "candidate", ",", "data", ")", "# If we have constructed a non empty value then add it as the next filter value", "# to use when searching for the user record.", "if", "value", ":", "filter_values", ".", "append", "(", "value", ")", "satosa_logging", "(", "logger", ",", "logging", ".", "DEBUG", ",", "\"Added search filter value {} to list of search filters\"", ".", "format", "(", "value", ")", ",", "context", ".", "state", ")", "# Initialize an empty LDAP record. The first LDAP record found using the ordered", "# list of search filter values will be the record used.", "record", "=", "None", "try", ":", "connection", "=", "config", "[", "'connection'", "]", "for", "filter_val", "in", "filter_values", ":", "if", "record", ":", "break", "search_filter", "=", "'({0}={1})'", ".", "format", "(", "config", "[", "'ldap_identifier_attribute'", "]", ",", "filter_val", ")", "satosa_logging", "(", "logger", ",", "logging", ".", "DEBUG", ",", "\"Constructed search filter {}\"", ".", "format", "(", "search_filter", ")", ",", "context", ".", "state", ")", "satosa_logging", "(", "logger", ",", "logging", ".", "DEBUG", ",", "\"Querying LDAP server...\"", ",", "context", ".", "state", ")", "message_id", "=", "connection", ".", "search", "(", "config", "[", "'search_base'", "]", ",", "search_filter", ",", "attributes", "=", "config", "[", "'search_return_attributes'", "]", ".", "keys", "(", ")", ")", "responses", "=", "connection", ".", "get_response", "(", "message_id", ")", "[", "0", "]", "satosa_logging", "(", "logger", ",", "logging", ".", "DEBUG", ",", "\"Done querying LDAP server\"", ",", "context", ".", "state", ")", "satosa_logging", "(", "logger", ",", "logging", ".", "DEBUG", ",", "\"LDAP server returned {} records\"", ".", "format", "(", "len", "(", "responses", ")", ")", ",", "context", ".", "state", ")", "# for now consider only the first record found (if any)", "if", "len", "(", "responses", ")", ">", "0", ":", "if", "len", "(", "responses", ")", ">", "1", ":", "satosa_logging", "(", "logger", ",", "logging", ".", "WARN", ",", "\"LDAP server returned {} records using search filter value {}\"", ".", "format", "(", "len", "(", "responses", ")", ",", "filter_val", ")", ",", "context", ".", "state", ")", "record", "=", "responses", "[", "0", "]", "break", "except", "LDAPException", "as", "err", ":", "satosa_logging", "(", "logger", ",", "logging", ".", "ERROR", ",", "\"Caught LDAP exception: {}\"", ".", "format", "(", "err", ")", ",", "context", ".", "state", ")", "except", "LdapAttributeStoreError", "as", "err", ":", "satosa_logging", "(", "logger", ",", "logging", ".", "ERROR", ",", "\"Caught LDAP Attribute Store exception: {}\"", ".", "format", "(", "err", ")", ",", "context", ".", "state", ")", "except", "Exception", "as", "err", ":", "satosa_logging", "(", "logger", ",", "logging", ".", "ERROR", ",", "\"Caught unhandled exception: {}\"", ".", "format", "(", "err", ")", ",", "context", ".", "state", ")", "else", ":", "err", "=", "None", "finally", ":", "if", "err", ":", "return", "super", "(", ")", ".", "process", "(", "context", ",", "data", ")", "# Before using a found record, if any, to populate attributes", "# clear any attributes incoming to this microservice if so configured.", "if", "config", "[", "'clear_input_attributes'", "]", ":", "satosa_logging", "(", "logger", ",", "logging", ".", "DEBUG", ",", "\"Clearing values for these input attributes: {}\"", ".", "format", "(", "data", ".", "attributes", ")", ",", "context", ".", "state", ")", "data", ".", "attributes", "=", "{", "}", "# Use a found record, if any, to populate attributes and input for NameID", "if", "record", ":", "satosa_logging", "(", "logger", ",", "logging", ".", "DEBUG", ",", "\"Using record with DN {}\"", ".", "format", "(", "record", "[", "\"dn\"", "]", ")", ",", "context", ".", "state", ")", "satosa_logging", "(", "logger", ",", "logging", ".", "DEBUG", ",", "\"Record with DN {} has attributes {}\"", ".", "format", "(", "record", "[", "\"dn\"", "]", ",", "record", "[", "\"attributes\"", "]", ")", ",", "context", ".", "state", ")", "# Populate attributes as configured.", "self", ".", "_populate_attributes", "(", "config", ",", "record", ",", "context", ",", "data", ")", "# Populate input for NameID if configured. SATOSA core does the hashing of input", "# to create a persistent NameID.", "self", ".", "_populate_input_for_name_id", "(", "config", ",", "record", ",", "context", ",", "data", ")", "else", ":", "satosa_logging", "(", "logger", ",", "logging", ".", "WARN", ",", "\"No record found in LDAP so no attributes will be added\"", ",", "context", ".", "state", ")", "on_ldap_search_result_empty", "=", "config", "[", "'on_ldap_search_result_empty'", "]", "if", "on_ldap_search_result_empty", ":", "# Redirect to the configured URL with", "# the entityIDs for the target SP and IdP used by the user", "# as query string parameters (URL encoded).", "encoded_sp_entity_id", "=", "urllib", ".", "parse", ".", "quote_plus", "(", "sp_entity_id", ")", "encoded_idp_entity_id", "=", "urllib", ".", "parse", ".", "quote_plus", "(", "data", ".", "auth_info", ".", "issuer", ")", "url", "=", "\"{}?sp={}&idp={}\"", ".", "format", "(", "on_ldap_search_result_empty", ",", "encoded_sp_entity_id", ",", "encoded_idp_entity_id", ")", "satosa_logging", "(", "logger", ",", "logging", ".", "INFO", ",", "\"Redirecting to {}\"", ".", "format", "(", "url", ")", ",", "context", ".", "state", ")", "return", "Redirect", "(", "url", ")", "satosa_logging", "(", "logger", ",", "logging", ".", "DEBUG", ",", "\"Returning data.attributes {}\"", ".", "format", "(", "str", "(", "data", ".", "attributes", ")", ")", ",", "context", ".", "state", ")", "return", "super", "(", ")", ".", "process", "(", "context", ",", "data", ")" ]
53.182609
34.486957
def get_or_create(full_filename, headers_types=None, default_entry=''): """Load a .csv file into a CSVModel if the file exists, or create a new CSVModel with the given filename if the file does not exist. Parameters ---------- full_filename : :obj:`str` The file path to a .csv file. headers_types : :obj:`list` of :obj:`tuple` of :obj:`str`, :obj:`str` A list of tuples, where the first element in each tuple is the string header for a column and the second element is that column's data type as a string. default_entry : :obj:`str` The default entry for cells in the CSV. Returns ------- :obj:`CSVModel` The CSVModel initialized with the data in the given file, or a new CSVModel tied to the filename if the file doesn't currently exist. """ # convert dictionaries to list if isinstance(headers_types, dict): headers_types_list = [(k,v) for k,v in headers_types.items()] headers_types = headers_types_list if os.path.isfile(full_filename): return CSVModel.load(full_filename) else: return CSVModel(full_filename, headers_types, default_entry=default_entry)
[ "def", "get_or_create", "(", "full_filename", ",", "headers_types", "=", "None", ",", "default_entry", "=", "''", ")", ":", "# convert dictionaries to list", "if", "isinstance", "(", "headers_types", ",", "dict", ")", ":", "headers_types_list", "=", "[", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "headers_types", ".", "items", "(", ")", "]", "headers_types", "=", "headers_types_list", "if", "os", ".", "path", ".", "isfile", "(", "full_filename", ")", ":", "return", "CSVModel", ".", "load", "(", "full_filename", ")", "else", ":", "return", "CSVModel", "(", "full_filename", ",", "headers_types", ",", "default_entry", "=", "default_entry", ")" ]
40.125
22.375
def mss(**kwargs): # type: (Any) -> MSSMixin """ Factory returning a proper MSS class instance. It detects the plateform we are running on and choose the most adapted mss_class to take screenshots. It then proxies its arguments to the class for instantiation. """ os_ = platform.system().lower() if os_ == "darwin": from . import darwin return darwin.MSS(**kwargs) if os_ == "linux": from . import linux return linux.MSS(**kwargs) if os_ == "windows": from . import windows return windows.MSS(**kwargs) raise ScreenShotError("System {!r} not (yet?) implemented.".format(os_))
[ "def", "mss", "(", "*", "*", "kwargs", ")", ":", "# type: (Any) -> MSSMixin", "os_", "=", "platform", ".", "system", "(", ")", ".", "lower", "(", ")", "if", "os_", "==", "\"darwin\"", ":", "from", ".", "import", "darwin", "return", "darwin", ".", "MSS", "(", "*", "*", "kwargs", ")", "if", "os_", "==", "\"linux\"", ":", "from", ".", "import", "linux", "return", "linux", ".", "MSS", "(", "*", "*", "kwargs", ")", "if", "os_", "==", "\"windows\"", ":", "from", ".", "import", "windows", "return", "windows", ".", "MSS", "(", "*", "*", "kwargs", ")", "raise", "ScreenShotError", "(", "\"System {!r} not (yet?) implemented.\"", ".", "format", "(", "os_", ")", ")" ]
22.533333
21.7
def reset_stats(self, pattern): """Reset VM statistics. in pattern of type str The selection pattern. A bit similar to filename globbing. """ if not isinstance(pattern, basestring): raise TypeError("pattern can only be an instance of type basestring") self._call("resetStats", in_p=[pattern])
[ "def", "reset_stats", "(", "self", ",", "pattern", ")", ":", "if", "not", "isinstance", "(", "pattern", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"pattern can only be an instance of type basestring\"", ")", "self", ".", "_call", "(", "\"resetStats\"", ",", "in_p", "=", "[", "pattern", "]", ")" ]
33.545455
17.181818
def prt_nts(self, desc2nts, prt=sys.stdout, prtfmt=None): """Print grouped and sorted GO IDs.""" # deprecated # Set print format string if prtfmt is None: prtfmt = "{{hdr1usr01:2}} {FMT}\n".format(FMT=self.grprobj.gosubdag.prt_attr['fmt']) # 1-D: data to print is a flat list of namedtuples if 'flat' in desc2nts: prt_txt(prt, desc2nts['flat'], prtfmt=prtfmt) # 2-D: data to print is a list of [(section, nts), ... else: WrSectionsTxt.prt_sections(prt, desc2nts['sections'], prtfmt)
[ "def", "prt_nts", "(", "self", ",", "desc2nts", ",", "prt", "=", "sys", ".", "stdout", ",", "prtfmt", "=", "None", ")", ":", "# deprecated", "# Set print format string", "if", "prtfmt", "is", "None", ":", "prtfmt", "=", "\"{{hdr1usr01:2}} {FMT}\\n\"", ".", "format", "(", "FMT", "=", "self", ".", "grprobj", ".", "gosubdag", ".", "prt_attr", "[", "'fmt'", "]", ")", "# 1-D: data to print is a flat list of namedtuples", "if", "'flat'", "in", "desc2nts", ":", "prt_txt", "(", "prt", ",", "desc2nts", "[", "'flat'", "]", ",", "prtfmt", "=", "prtfmt", ")", "# 2-D: data to print is a list of [(section, nts), ...", "else", ":", "WrSectionsTxt", ".", "prt_sections", "(", "prt", ",", "desc2nts", "[", "'sections'", "]", ",", "prtfmt", ")" ]
47.583333
20.083333
def update_liststore_image(liststore, tree_iters, col, pcs_files, dir_name, icon_size=96): '''下载文件缩略图, 并将它显示到liststore里. pcs_files - 里面包含了几个必要的字段. dir_name - 缓存目录, 下载到的图片会保存这个目录里. size - 指定图片的缩放大小, 默认是96px. ''' def update_image(filepath, tree_iter): try: pix = GdkPixbuf.Pixbuf.new_from_file_at_size(filepath, icon_size, icon_size) tree_path = liststore.get_path(tree_iter) if tree_path is None: return liststore[tree_path][col] = pix except GLib.GError: logger.error(traceback.format_exc()) def dump_image(url, filepath): req = net.urlopen(url) if not req or not req.data: logger.warn('update_liststore_image(), failed to request %s' % url) return False with open(filepath, 'wb') as fh: fh.write(req.data) return True for tree_iter, pcs_file in zip(tree_iters, pcs_files): if 'thumbs' not in pcs_file: continue if 'url1' in pcs_file['thumbs']: key = 'url1' elif 'url2' in pcs_file['thumbs']: key = 'url2' elif 'url3' in pcs_file['thumbs']: key = 'url3' else: continue fs_id = pcs_file['fs_id'] url = pcs_file['thumbs'][key] filepath = os.path.join(dir_name, '{0}.jpg'.format(fs_id)) if os.path.exists(filepath) and os.path.getsize(filepath): GLib.idle_add(update_image, filepath, tree_iter) elif not url or len(url) < 10: logger.warn('update_liststore_image(), failed to get url') else: status = dump_image(url, filepath) if status: GLib.idle_add(update_image, filepath, tree_iter)
[ "def", "update_liststore_image", "(", "liststore", ",", "tree_iters", ",", "col", ",", "pcs_files", ",", "dir_name", ",", "icon_size", "=", "96", ")", ":", "def", "update_image", "(", "filepath", ",", "tree_iter", ")", ":", "try", ":", "pix", "=", "GdkPixbuf", ".", "Pixbuf", ".", "new_from_file_at_size", "(", "filepath", ",", "icon_size", ",", "icon_size", ")", "tree_path", "=", "liststore", ".", "get_path", "(", "tree_iter", ")", "if", "tree_path", "is", "None", ":", "return", "liststore", "[", "tree_path", "]", "[", "col", "]", "=", "pix", "except", "GLib", ".", "GError", ":", "logger", ".", "error", "(", "traceback", ".", "format_exc", "(", ")", ")", "def", "dump_image", "(", "url", ",", "filepath", ")", ":", "req", "=", "net", ".", "urlopen", "(", "url", ")", "if", "not", "req", "or", "not", "req", ".", "data", ":", "logger", ".", "warn", "(", "'update_liststore_image(), failed to request %s'", "%", "url", ")", "return", "False", "with", "open", "(", "filepath", ",", "'wb'", ")", "as", "fh", ":", "fh", ".", "write", "(", "req", ".", "data", ")", "return", "True", "for", "tree_iter", ",", "pcs_file", "in", "zip", "(", "tree_iters", ",", "pcs_files", ")", ":", "if", "'thumbs'", "not", "in", "pcs_file", ":", "continue", "if", "'url1'", "in", "pcs_file", "[", "'thumbs'", "]", ":", "key", "=", "'url1'", "elif", "'url2'", "in", "pcs_file", "[", "'thumbs'", "]", ":", "key", "=", "'url2'", "elif", "'url3'", "in", "pcs_file", "[", "'thumbs'", "]", ":", "key", "=", "'url3'", "else", ":", "continue", "fs_id", "=", "pcs_file", "[", "'fs_id'", "]", "url", "=", "pcs_file", "[", "'thumbs'", "]", "[", "key", "]", "filepath", "=", "os", ".", "path", ".", "join", "(", "dir_name", ",", "'{0}.jpg'", ".", "format", "(", "fs_id", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "filepath", ")", "and", "os", ".", "path", ".", "getsize", "(", "filepath", ")", ":", "GLib", ".", "idle_add", "(", "update_image", ",", "filepath", ",", "tree_iter", ")", "elif", "not", "url", "or", "len", "(", "url", ")", "<", "10", ":", "logger", ".", "warn", "(", "'update_liststore_image(), failed to get url'", ")", "else", ":", "status", "=", "dump_image", "(", "url", ",", "filepath", ")", "if", "status", ":", "GLib", ".", "idle_add", "(", "update_image", ",", "filepath", ",", "tree_iter", ")" ]
36.68
16.08
def disable_by_count(self): """ Disable the profiler if the number of disable requests matches the number of enable requests. """ if self.enable_count > 0: self.enable_count -= 1 if self.enable_count == 0: self.disable()
[ "def", "disable_by_count", "(", "self", ")", ":", "if", "self", ".", "enable_count", ">", "0", ":", "self", ".", "enable_count", "-=", "1", "if", "self", ".", "enable_count", "==", "0", ":", "self", ".", "disable", "(", ")" ]
35.625
5.5
def load(self, dataset_keys, previous_datasets=None): """Load `dataset_keys`. If `previous_datasets` is provided, do not reload those.""" all_datasets = previous_datasets or DatasetDict() datasets = DatasetDict() # Include coordinates in the list of datasets to load dsids = [self.get_dataset_key(ds_key) for ds_key in dataset_keys] coordinates = self._get_coordinates_for_dataset_keys(dsids) all_dsids = list(set().union(*coordinates.values())) + dsids for dsid in all_dsids: if dsid in all_datasets: continue coords = [all_datasets.get(cid, None) for cid in coordinates.get(dsid, [])] ds = self._load_dataset_with_area(dsid, coords) if ds is not None: all_datasets[dsid] = ds if dsid in dsids: datasets[dsid] = ds self._load_ancillary_variables(all_datasets) return datasets
[ "def", "load", "(", "self", ",", "dataset_keys", ",", "previous_datasets", "=", "None", ")", ":", "all_datasets", "=", "previous_datasets", "or", "DatasetDict", "(", ")", "datasets", "=", "DatasetDict", "(", ")", "# Include coordinates in the list of datasets to load", "dsids", "=", "[", "self", ".", "get_dataset_key", "(", "ds_key", ")", "for", "ds_key", "in", "dataset_keys", "]", "coordinates", "=", "self", ".", "_get_coordinates_for_dataset_keys", "(", "dsids", ")", "all_dsids", "=", "list", "(", "set", "(", ")", ".", "union", "(", "*", "coordinates", ".", "values", "(", ")", ")", ")", "+", "dsids", "for", "dsid", "in", "all_dsids", ":", "if", "dsid", "in", "all_datasets", ":", "continue", "coords", "=", "[", "all_datasets", ".", "get", "(", "cid", ",", "None", ")", "for", "cid", "in", "coordinates", ".", "get", "(", "dsid", ",", "[", "]", ")", "]", "ds", "=", "self", ".", "_load_dataset_with_area", "(", "dsid", ",", "coords", ")", "if", "ds", "is", "not", "None", ":", "all_datasets", "[", "dsid", "]", "=", "ds", "if", "dsid", "in", "dsids", ":", "datasets", "[", "dsid", "]", "=", "ds", "self", ".", "_load_ancillary_variables", "(", "all_datasets", ")", "return", "datasets" ]
39.28
17.28
def set_color(self, group, color, pct=1): """ Sets brightness of LEDs in the given group to the values specified in color tuple. When percentage is specified, brightness of each LED is reduced proportionally. Example:: my_leds = Leds() my_leds.set_color('LEFT', 'AMBER') With a custom color:: my_leds = Leds() my_leds.set_color('LEFT', (0.5, 0.3)) """ # If this is a platform without LEDs there is nothing to do if not self.leds: return color_tuple = color if isinstance(color, str): assert color in self.led_colors, \ "%s is an invalid LED color, valid choices are %s" % \ (color, ', '.join(self.led_colors.keys())) color_tuple = self.led_colors[color] assert group in self.led_groups, \ "%s is an invalid LED group, valid choices are %s" % \ (group, ', '.join(self.led_groups.keys())) for led, value in zip(self.led_groups[group], color_tuple): led.brightness_pct = value * pct
[ "def", "set_color", "(", "self", ",", "group", ",", "color", ",", "pct", "=", "1", ")", ":", "# If this is a platform without LEDs there is nothing to do", "if", "not", "self", ".", "leds", ":", "return", "color_tuple", "=", "color", "if", "isinstance", "(", "color", ",", "str", ")", ":", "assert", "color", "in", "self", ".", "led_colors", ",", "\"%s is an invalid LED color, valid choices are %s\"", "%", "(", "color", ",", "', '", ".", "join", "(", "self", ".", "led_colors", ".", "keys", "(", ")", ")", ")", "color_tuple", "=", "self", ".", "led_colors", "[", "color", "]", "assert", "group", "in", "self", ".", "led_groups", ",", "\"%s is an invalid LED group, valid choices are %s\"", "%", "(", "group", ",", "', '", ".", "join", "(", "self", ".", "led_groups", ".", "keys", "(", ")", ")", ")", "for", "led", ",", "value", "in", "zip", "(", "self", ".", "led_groups", "[", "group", "]", ",", "color_tuple", ")", ":", "led", ".", "brightness_pct", "=", "value", "*", "pct" ]
33.636364
19.818182
def multi_evaluate(self, x, out=None): """Evaluate log of the density to propose ``x``, namely log(q(x)) for each row in x. :param x: Matrix-like array; the proposed points. Expect i-th accessible as ``x[i]``. :param out: Vector-like array, length==``len(x)``, optional; If provided, the output is written into this array. """ if out is None: out = _np.empty(len(x)) else: assert len(out) == len(x) for i, point in enumerate(x): out[i] = self.evaluate(point) return out
[ "def", "multi_evaluate", "(", "self", ",", "x", ",", "out", "=", "None", ")", ":", "if", "out", "is", "None", ":", "out", "=", "_np", ".", "empty", "(", "len", "(", "x", ")", ")", "else", ":", "assert", "len", "(", "out", ")", "==", "len", "(", "x", ")", "for", "i", ",", "point", "in", "enumerate", "(", "x", ")", ":", "out", "[", "i", "]", "=", "self", ".", "evaluate", "(", "point", ")", "return", "out" ]
25.375
21.291667
def request(self, persist_id=None): """Cancel an ongoing confirmed commit. Depends on the `:candidate` and `:confirmed-commit` capabilities. *persist-id* value must be equal to the value given in the <persist> parameter to the previous <commit> operation. """ node = new_ele("cancel-commit") if persist_id is not None: sub_ele(node, "persist-id").text = persist_id return self._request(node)
[ "def", "request", "(", "self", ",", "persist_id", "=", "None", ")", ":", "node", "=", "new_ele", "(", "\"cancel-commit\"", ")", "if", "persist_id", "is", "not", "None", ":", "sub_ele", "(", "node", ",", "\"persist-id\"", ")", ".", "text", "=", "persist_id", "return", "self", ".", "_request", "(", "node", ")" ]
44.4
19.7
def to_representation(self, instance): """ Object instance -> Dict of primitive datatypes. """ ret = OrderedDict() readable_fields = [ field for field in self.fields.values() if not field.write_only ] for field in readable_fields: try: field_representation = self._get_field_representation(field, instance) ret[field.field_name] = field_representation except SkipField: continue return ret
[ "def", "to_representation", "(", "self", ",", "instance", ")", ":", "ret", "=", "OrderedDict", "(", ")", "readable_fields", "=", "[", "field", "for", "field", "in", "self", ".", "fields", ".", "values", "(", ")", "if", "not", "field", ".", "write_only", "]", "for", "field", "in", "readable_fields", ":", "try", ":", "field_representation", "=", "self", ".", "_get_field_representation", "(", "field", ",", "instance", ")", "ret", "[", "field", ".", "field_name", "]", "=", "field_representation", "except", "SkipField", ":", "continue", "return", "ret" ]
29.666667
17.333333
def write_chunk(outfile, tag, data=b''): """ Write a PNG chunk to the output file, including length and checksum. """ data = bytes(data) # http://www.w3.org/TR/PNG/#5Chunk-layout outfile.write(struct.pack("!I", len(data))) outfile.write(tag) outfile.write(data) checksum = zlib.crc32(tag) checksum = zlib.crc32(data, checksum) checksum &= 2 ** 32 - 1 outfile.write(struct.pack("!I", checksum))
[ "def", "write_chunk", "(", "outfile", ",", "tag", ",", "data", "=", "b''", ")", ":", "data", "=", "bytes", "(", "data", ")", "# http://www.w3.org/TR/PNG/#5Chunk-layout", "outfile", ".", "write", "(", "struct", ".", "pack", "(", "\"!I\"", ",", "len", "(", "data", ")", ")", ")", "outfile", ".", "write", "(", "tag", ")", "outfile", ".", "write", "(", "data", ")", "checksum", "=", "zlib", ".", "crc32", "(", "tag", ")", "checksum", "=", "zlib", ".", "crc32", "(", "data", ",", "checksum", ")", "checksum", "&=", "2", "**", "32", "-", "1", "outfile", ".", "write", "(", "struct", ".", "pack", "(", "\"!I\"", ",", "checksum", ")", ")" ]
28.8
12.266667
def _slice(self, slicer): """ Return a slice of myself. For internal compatibility with numpy arrays. """ # only allow 1 dimensional slicing, but can # in a 2-d case be passd (slice(None),....) if isinstance(slicer, tuple) and len(slicer) == 2: if not com.is_null_slice(slicer[0]): raise AssertionError("invalid slicing for a 1-ndim " "categorical") slicer = slicer[1] codes = self._codes[slicer] return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
[ "def", "_slice", "(", "self", ",", "slicer", ")", ":", "# only allow 1 dimensional slicing, but can", "# in a 2-d case be passd (slice(None),....)", "if", "isinstance", "(", "slicer", ",", "tuple", ")", "and", "len", "(", "slicer", ")", "==", "2", ":", "if", "not", "com", ".", "is_null_slice", "(", "slicer", "[", "0", "]", ")", ":", "raise", "AssertionError", "(", "\"invalid slicing for a 1-ndim \"", "\"categorical\"", ")", "slicer", "=", "slicer", "[", "1", "]", "codes", "=", "self", ".", "_codes", "[", "slicer", "]", "return", "self", ".", "_constructor", "(", "values", "=", "codes", ",", "dtype", "=", "self", ".", "dtype", ",", "fastpath", "=", "True", ")" ]
35.529412
17.411765
def bbox_to_ybox(bbox): """Convert from corner bounding box to center/shape""" return [ (bbox[1] + bbox[3]) / 2, (bbox[0] + bbox[2]) / 2, (bbox[3] - bbox[1]), (bbox[2] - bbox[0]), ]
[ "def", "bbox_to_ybox", "(", "bbox", ")", ":", "return", "[", "(", "bbox", "[", "1", "]", "+", "bbox", "[", "3", "]", ")", "/", "2", ",", "(", "bbox", "[", "0", "]", "+", "bbox", "[", "2", "]", ")", "/", "2", ",", "(", "bbox", "[", "3", "]", "-", "bbox", "[", "1", "]", ")", ",", "(", "bbox", "[", "2", "]", "-", "bbox", "[", "0", "]", ")", ",", "]" ]
29.25
13
def post(self, request): """Log out the user.""" logout_url = self.redirect_url if is_authenticated(request.user): # Check if a method exists to build the URL to log out the user # from the OP. logout_from_op = self.get_settings('OIDC_OP_LOGOUT_URL_METHOD', '') if logout_from_op: logout_url = import_string(logout_from_op)(request) # Log out the Django user if they were logged in. auth.logout(request) return HttpResponseRedirect(logout_url)
[ "def", "post", "(", "self", ",", "request", ")", ":", "logout_url", "=", "self", ".", "redirect_url", "if", "is_authenticated", "(", "request", ".", "user", ")", ":", "# Check if a method exists to build the URL to log out the user", "# from the OP.", "logout_from_op", "=", "self", ".", "get_settings", "(", "'OIDC_OP_LOGOUT_URL_METHOD'", ",", "''", ")", "if", "logout_from_op", ":", "logout_url", "=", "import_string", "(", "logout_from_op", ")", "(", "request", ")", "# Log out the Django user if they were logged in.", "auth", ".", "logout", "(", "request", ")", "return", "HttpResponseRedirect", "(", "logout_url", ")" ]
36.8
20.066667
def solveAndNotify(self, request): """Notifies the owner of the current request (so, the user doing the exercise) that they've solved the exercise, and mark it as solved in the database. """ remote = request.transport.remote withThisIdentifier = Exercise.identifier == self.exerciseIdentifier exercise = self.store.findUnique(Exercise, withThisIdentifier) solveAndNotify(remote, exercise)
[ "def", "solveAndNotify", "(", "self", ",", "request", ")", ":", "remote", "=", "request", ".", "transport", ".", "remote", "withThisIdentifier", "=", "Exercise", ".", "identifier", "==", "self", ".", "exerciseIdentifier", "exercise", "=", "self", ".", "store", ".", "findUnique", "(", "Exercise", ",", "withThisIdentifier", ")", "solveAndNotify", "(", "remote", ",", "exercise", ")" ]
44.4
14.7
def Modified(self): """Also updates the state of the containing oneof in the parent message.""" try: self._parent_message_weakref._UpdateOneofState(self._field) super(_OneofListener, self).Modified() except ReferenceError: pass
[ "def", "Modified", "(", "self", ")", ":", "try", ":", "self", ".", "_parent_message_weakref", ".", "_UpdateOneofState", "(", "self", ".", "_field", ")", "super", "(", "_OneofListener", ",", "self", ")", ".", "Modified", "(", ")", "except", "ReferenceError", ":", "pass" ]
35.857143
18
def upsert_all(engine, table, data): """ Update data by primary key columns. If not able to update, do insert. Example:: # suppose in database we already have {"id": 1, "name": "Alice"} >>> data = [ ... {"id": 1, "name": "Bob"}, # this will be updated ... {"id": 2, "name": "Cathy"}, # this will be added ... ] >>> upsert_all(engine, table_user, data) >>> engine.execute(select([table_user])).fetchall() [{"id": 1, "name": "Bob"}, {"id": 2, "name": "Cathy"}] **中文文档** 批量更新文档. 如果该表格定义了Primary Key, 则用Primary Key约束where语句. 对于 where语句无法找到的行, 自动进行批量bulk insert. """ update_all(engine, table, data, upsert=True)
[ "def", "upsert_all", "(", "engine", ",", "table", ",", "data", ")", ":", "update_all", "(", "engine", ",", "table", ",", "data", ",", "upsert", "=", "True", ")" ]
33.047619
21.809524
def query(self): """A MultiDictProxy representing parsed query parameters in decoded representation. Empty value if URL has no query part. """ ret = MultiDict(parse_qsl(self.raw_query_string, keep_blank_values=True)) return MultiDictProxy(ret)
[ "def", "query", "(", "self", ")", ":", "ret", "=", "MultiDict", "(", "parse_qsl", "(", "self", ".", "raw_query_string", ",", "keep_blank_values", "=", "True", ")", ")", "return", "MultiDictProxy", "(", "ret", ")" ]
31.666667
19.222222
def _send_content(self, content, connection): """ Send a content array from the connection """ if connection: if connection.async: callback = connection.callbacks['remote'] if callback: callback(self, self.parent_object, content) self.current_connection.reset() self.current_connection = None else: return (self, self.parent_object, content)
[ "def", "_send_content", "(", "self", ",", "content", ",", "connection", ")", ":", "if", "connection", ":", "if", "connection", ".", "async", ":", "callback", "=", "connection", ".", "callbacks", "[", "'remote'", "]", "if", "callback", ":", "callback", "(", "self", ",", "self", ".", "parent_object", ",", "content", ")", "self", ".", "current_connection", ".", "reset", "(", ")", "self", ".", "current_connection", "=", "None", "else", ":", "return", "(", "self", ",", "self", ".", "parent_object", ",", "content", ")" ]
31.933333
20.333333
def vol_tehrahedron(poly): """volume of a irregular tetrahedron""" p_a = np.array(poly[0]) p_b = np.array(poly[1]) p_c = np.array(poly[2]) p_d = np.array(poly[3]) return abs(np.dot( np.subtract(p_a, p_d), np.cross( np.subtract(p_b, p_d), np.subtract(p_c, p_d))) / 6)
[ "def", "vol_tehrahedron", "(", "poly", ")", ":", "p_a", "=", "np", ".", "array", "(", "poly", "[", "0", "]", ")", "p_b", "=", "np", ".", "array", "(", "poly", "[", "1", "]", ")", "p_c", "=", "np", ".", "array", "(", "poly", "[", "2", "]", ")", "p_d", "=", "np", ".", "array", "(", "poly", "[", "3", "]", ")", "return", "abs", "(", "np", ".", "dot", "(", "np", ".", "subtract", "(", "p_a", ",", "p_d", ")", ",", "np", ".", "cross", "(", "np", ".", "subtract", "(", "p_b", ",", "p_d", ")", ",", "np", ".", "subtract", "(", "p_c", ",", "p_d", ")", ")", ")", "/", "6", ")" ]
29.090909
11.181818
def permute(self, qubits: Qubits) -> 'QubitVector': """Permute the order of the qubits""" if qubits == self.qubits: return self N = self.qubit_nb assert len(qubits) == N # Will raise a value error if qubits don't match indices = [self.qubits.index(q) for q in qubits] # type: ignore perm: List[int] = [] for rr in range(0, self.rank): perm += [rr * N + idx for idx in indices] tensor = bk.transpose(self.tensor, perm) return QubitVector(tensor, qubits)
[ "def", "permute", "(", "self", ",", "qubits", ":", "Qubits", ")", "->", "'QubitVector'", ":", "if", "qubits", "==", "self", ".", "qubits", ":", "return", "self", "N", "=", "self", ".", "qubit_nb", "assert", "len", "(", "qubits", ")", "==", "N", "# Will raise a value error if qubits don't match", "indices", "=", "[", "self", ".", "qubits", ".", "index", "(", "q", ")", "for", "q", "in", "qubits", "]", "# type: ignore", "perm", ":", "List", "[", "int", "]", "=", "[", "]", "for", "rr", "in", "range", "(", "0", ",", "self", ".", "rank", ")", ":", "perm", "+=", "[", "rr", "*", "N", "+", "idx", "for", "idx", "in", "indices", "]", "tensor", "=", "bk", ".", "transpose", "(", "self", ".", "tensor", ",", "perm", ")", "return", "QubitVector", "(", "tensor", ",", "qubits", ")" ]
32.058824
17.882353
def update_doc(input, **params): """ Updates document with value from another document :param input: :param params: :return: """ PARAM_SOURCE = 'source' SOURCE_COL = 'src.col' SOURCE_FIELD = 'src.field' PARAM_RESULT = 'result' res = input[params.get(PARAM_RESULT)] for src in params.get(PARAM_SOURCE): res[src[SOURCE_FIELD]] = input[src[SOURCE_COL]][src[SOURCE_FIELD]] return res
[ "def", "update_doc", "(", "input", ",", "*", "*", "params", ")", ":", "PARAM_SOURCE", "=", "'source'", "SOURCE_COL", "=", "'src.col'", "SOURCE_FIELD", "=", "'src.field'", "PARAM_RESULT", "=", "'result'", "res", "=", "input", "[", "params", ".", "get", "(", "PARAM_RESULT", ")", "]", "for", "src", "in", "params", ".", "get", "(", "PARAM_SOURCE", ")", ":", "res", "[", "src", "[", "SOURCE_FIELD", "]", "]", "=", "input", "[", "src", "[", "SOURCE_COL", "]", "]", "[", "src", "[", "SOURCE_FIELD", "]", "]", "return", "res" ]
26.5625
15.3125
def rename_notes_folder(self, title, folderid): """Rename a folder :param title: New title of the folder :param folderid: The UUID of the folder to rename """ if self.standard_grant_type is not "authorization_code": raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.") response = self._req('/notes/folders/rename/{}'.format(folderid), post_data={ 'title' : title }) return response
[ "def", "rename_notes_folder", "(", "self", ",", "title", ",", "folderid", ")", ":", "if", "self", ".", "standard_grant_type", "is", "not", "\"authorization_code\"", ":", "raise", "DeviantartError", "(", "\"Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.\"", ")", "response", "=", "self", ".", "_req", "(", "'/notes/folders/rename/{}'", ".", "format", "(", "folderid", ")", ",", "post_data", "=", "{", "'title'", ":", "title", "}", ")", "return", "response" ]
33.5
28.6875
def resize_bytes(self, size): """ Resize the buffer (in-place, deferred operation) Parameters ---------- size : integer New buffer size in bytes Notes ----- This clears any pending operations. """ Buffer.resize_bytes(self, size) self._size = size // self.itemsize
[ "def", "resize_bytes", "(", "self", ",", "size", ")", ":", "Buffer", ".", "resize_bytes", "(", "self", ",", "size", ")", "self", ".", "_size", "=", "size", "//", "self", ".", "itemsize" ]
24.571429
15.5
def disk_clone(hypervisor, identifier, storage_pool, configuration, image, logger): """Disk image cloning. Given an original disk image it clones it into a new one, the clone will be created within the storage pool. The following values are set into the disk XML configuration: * name * target/path * target/permission/label * backingStore/path if copy on write is enabled """ cow = configuration.get('copy_on_write', False) try: volume = hypervisor.storageVolLookupByPath(image) except libvirt.libvirtError: if os.path.exists(image): pool_path = os.path.dirname(image) logger.info("LibVirt pool does not exist, creating {} pool".format( pool_path.replace('/', '_'))) pool = hypervisor.storagePoolDefineXML(BASE_POOL_CONFIG.format( pool_path.replace('/', '_'), pool_path)) pool.setAutostart(True) pool.create() pool.refresh() volume = hypervisor.storageVolLookupByPath(image) else: raise RuntimeError( "%s disk does not exist." % image) xml = disk_xml(identifier, storage_pool.XMLDesc(0), volume.XMLDesc(0), cow) if cow: storage_pool.createXML(xml, 0) else: storage_pool.createXMLFrom(xml, volume, 0)
[ "def", "disk_clone", "(", "hypervisor", ",", "identifier", ",", "storage_pool", ",", "configuration", ",", "image", ",", "logger", ")", ":", "cow", "=", "configuration", ".", "get", "(", "'copy_on_write'", ",", "False", ")", "try", ":", "volume", "=", "hypervisor", ".", "storageVolLookupByPath", "(", "image", ")", "except", "libvirt", ".", "libvirtError", ":", "if", "os", ".", "path", ".", "exists", "(", "image", ")", ":", "pool_path", "=", "os", ".", "path", ".", "dirname", "(", "image", ")", "logger", ".", "info", "(", "\"LibVirt pool does not exist, creating {} pool\"", ".", "format", "(", "pool_path", ".", "replace", "(", "'/'", ",", "'_'", ")", ")", ")", "pool", "=", "hypervisor", ".", "storagePoolDefineXML", "(", "BASE_POOL_CONFIG", ".", "format", "(", "pool_path", ".", "replace", "(", "'/'", ",", "'_'", ")", ",", "pool_path", ")", ")", "pool", ".", "setAutostart", "(", "True", ")", "pool", ".", "create", "(", ")", "pool", ".", "refresh", "(", ")", "volume", "=", "hypervisor", ".", "storageVolLookupByPath", "(", "image", ")", "else", ":", "raise", "RuntimeError", "(", "\"%s disk does not exist.\"", "%", "image", ")", "xml", "=", "disk_xml", "(", "identifier", ",", "storage_pool", ".", "XMLDesc", "(", "0", ")", ",", "volume", ".", "XMLDesc", "(", "0", ")", ",", "cow", ")", "if", "cow", ":", "storage_pool", ".", "createXML", "(", "xml", ",", "0", ")", "else", ":", "storage_pool", ".", "createXMLFrom", "(", "xml", ",", "volume", ",", "0", ")" ]
34.684211
23.131579
def get_doc(node): """ Return a node's documentation as a string, pulling from annotations or constructing a simple fake as needed. """ res = " ".join(get_doc_annotations(node)) if not res: res = "(%s)" % node.__class__.__name__.lower() return res
[ "def", "get_doc", "(", "node", ")", ":", "res", "=", "\" \"", ".", "join", "(", "get_doc_annotations", "(", "node", ")", ")", "if", "not", "res", ":", "res", "=", "\"(%s)\"", "%", "node", ".", "__class__", ".", "__name__", ".", "lower", "(", ")", "return", "res" ]
30.555556
14.111111
def join_swarm(self, remote_addrs, join_token, listen_addr='0.0.0.0:2377', advertise_addr=None): """ Make this Engine join a swarm that has already been created. Args: remote_addrs (:py:class:`list`): Addresses of one or more manager nodes already participating in the Swarm to join. join_token (string): Secret token for joining this Swarm. listen_addr (string): Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). Default: ``'0.0.0.0:2377`` advertise_addr (string): Externally reachable address advertised to other nodes. This can either be an address/port combination in the form ``192.168.1.1:4567``, or an interface followed by a port number, like ``eth0:4567``. If the port number is omitted, the port number from the listen address is used. If AdvertiseAddr is not specified, it will be automatically detected when possible. Default: ``None`` Returns: ``True`` if the request went through. Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ data = { "RemoteAddrs": remote_addrs, "ListenAddr": listen_addr, "JoinToken": join_token, "AdvertiseAddr": advertise_addr, } url = self._url('/swarm/join') response = self._post_json(url, data=data) self._raise_for_status(response) return True
[ "def", "join_swarm", "(", "self", ",", "remote_addrs", ",", "join_token", ",", "listen_addr", "=", "'0.0.0.0:2377'", ",", "advertise_addr", "=", "None", ")", ":", "data", "=", "{", "\"RemoteAddrs\"", ":", "remote_addrs", ",", "\"ListenAddr\"", ":", "listen_addr", ",", "\"JoinToken\"", ":", "join_token", ",", "\"AdvertiseAddr\"", ":", "advertise_addr", ",", "}", "url", "=", "self", ".", "_url", "(", "'/swarm/join'", ")", "response", "=", "self", ".", "_post_json", "(", "url", ",", "data", "=", "data", ")", "self", ".", "_raise_for_status", "(", "response", ")", "return", "True" ]
45.394737
21.657895
def tree_node_to_xml(self, parent, item): """Converts a treeview item and children to xml nodes""" tree = self.treeview data = self.treedata[item] node = data.to_xml_node() children = tree.get_children(item) for child in children: cnode = ET.Element('child') cwidget = self.tree_node_to_xml(item, child) cnode.append(cwidget) node.append(cnode) return node
[ "def", "tree_node_to_xml", "(", "self", ",", "parent", ",", "item", ")", ":", "tree", "=", "self", ".", "treeview", "data", "=", "self", ".", "treedata", "[", "item", "]", "node", "=", "data", ".", "to_xml_node", "(", ")", "children", "=", "tree", ".", "get_children", "(", "item", ")", "for", "child", "in", "children", ":", "cnode", "=", "ET", ".", "Element", "(", "'child'", ")", "cwidget", "=", "self", ".", "tree_node_to_xml", "(", "item", ",", "child", ")", "cnode", ".", "append", "(", "cwidget", ")", "node", ".", "append", "(", "cnode", ")", "return", "node" ]
29.933333
14.2
async def sort(self, request, reverse=False): """Sort collection.""" return sorted( self.collection, key=lambda o: getattr(o, self.columns_sort, 0), reverse=reverse)
[ "async", "def", "sort", "(", "self", ",", "request", ",", "reverse", "=", "False", ")", ":", "return", "sorted", "(", "self", ".", "collection", ",", "key", "=", "lambda", "o", ":", "getattr", "(", "o", ",", "self", ".", "columns_sort", ",", "0", ")", ",", "reverse", "=", "reverse", ")" ]
47.5
19
def stripboxplot(x, y, data, ax=None, significant=None, **kwargs): """ Overlay a stripplot on top of a boxplot. """ ax = sb.boxplot( x=x, y=y, data=data, ax=ax, fliersize=0, **kwargs ) plot = sb.stripplot( x=x, y=y, data=data, ax=ax, jitter=kwargs.pop("jitter", 0.05), color=kwargs.pop("color", "0.3"), **kwargs ) if data[y].min() >= 0: hide_negative_y_ticks(plot) if significant is not None: add_significance_indicator(plot=plot, significant=significant) return plot
[ "def", "stripboxplot", "(", "x", ",", "y", ",", "data", ",", "ax", "=", "None", ",", "significant", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ax", "=", "sb", ".", "boxplot", "(", "x", "=", "x", ",", "y", "=", "y", ",", "data", "=", "data", ",", "ax", "=", "ax", ",", "fliersize", "=", "0", ",", "*", "*", "kwargs", ")", "plot", "=", "sb", ".", "stripplot", "(", "x", "=", "x", ",", "y", "=", "y", ",", "data", "=", "data", ",", "ax", "=", "ax", ",", "jitter", "=", "kwargs", ".", "pop", "(", "\"jitter\"", ",", "0.05", ")", ",", "color", "=", "kwargs", ".", "pop", "(", "\"color\"", ",", "\"0.3\"", ")", ",", "*", "*", "kwargs", ")", "if", "data", "[", "y", "]", ".", "min", "(", ")", ">=", "0", ":", "hide_negative_y_ticks", "(", "plot", ")", "if", "significant", "is", "not", "None", ":", "add_significance_indicator", "(", "plot", "=", "plot", ",", "significant", "=", "significant", ")", "return", "plot" ]
20.758621
21.310345
def asserts(self, *args, **kwargs): """Wraps match method and places under an assertion. Override this for higher-level control, such as returning a custom object for additional validation (e.g. expect().to.change()) """ result = self.match(*args, **kwargs) self.expect(result) return result
[ "def", "asserts", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "result", "=", "self", ".", "match", "(", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "expect", "(", "result", ")", "return", "result" ]
48.428571
13.714286
def check_AP_phase(abf,n=10): """X""" timePoints=get_AP_timepoints(abf)[:10] #first 10 if len(timePoints)==0: return swhlab.plot.new(abf,True,title="AP phase (n=%d)"%n,xlabel="mV",ylabel="V/S") Ys=abf.get_data_around(timePoints,msDeriv=.1,padding=.005) Xs=abf.get_data_around(timePoints,padding=.005) for i in range(1,len(Ys)): pylab.plot(Xs[i],Ys[i],alpha=.2,color='b') pylab.plot(Xs[0],Ys[0],alpha=.4,color='r',lw=1) pylab.margins(.1,.1)
[ "def", "check_AP_phase", "(", "abf", ",", "n", "=", "10", ")", ":", "timePoints", "=", "get_AP_timepoints", "(", "abf", ")", "[", ":", "10", "]", "#first 10", "if", "len", "(", "timePoints", ")", "==", "0", ":", "return", "swhlab", ".", "plot", ".", "new", "(", "abf", ",", "True", ",", "title", "=", "\"AP phase (n=%d)\"", "%", "n", ",", "xlabel", "=", "\"mV\"", ",", "ylabel", "=", "\"V/S\"", ")", "Ys", "=", "abf", ".", "get_data_around", "(", "timePoints", ",", "msDeriv", "=", ".1", ",", "padding", "=", ".005", ")", "Xs", "=", "abf", ".", "get_data_around", "(", "timePoints", ",", "padding", "=", ".005", ")", "for", "i", "in", "range", "(", "1", ",", "len", "(", "Ys", ")", ")", ":", "pylab", ".", "plot", "(", "Xs", "[", "i", "]", ",", "Ys", "[", "i", "]", ",", "alpha", "=", ".2", ",", "color", "=", "'b'", ")", "pylab", ".", "plot", "(", "Xs", "[", "0", "]", ",", "Ys", "[", "0", "]", ",", "alpha", "=", ".4", ",", "color", "=", "'r'", ",", "lw", "=", "1", ")", "pylab", ".", "margins", "(", ".1", ",", ".1", ")" ]
40
15.25
def ref_build_and_muscle_chunk(data, sample): """ 1. Run bedtools to get all overlapping regions 2. Parse out reads from regions using pysam and dump into chunk files. We measure it out to create 10 chunk files per sample. 3. If we really wanted to speed this up, though it is pretty fast already, we could parallelize it since we can easily break the regions into a list of chunks. """ ## get regions using bedtools regions = bedtools_merge(data, sample).strip().split("\n") nregions = len(regions) chunksize = (nregions / 10) + (nregions % 10) LOGGER.debug("nregions {} chunksize {}".format(nregions, chunksize)) ## create an output file to write clusters to idx = 0 tmpfile = os.path.join(data.tmpdir, sample.name+"_chunk_{}.ali") ## remove old files if they exist to avoid append errors for i in range(11): if os.path.exists(tmpfile.format(i)): os.remove(tmpfile.format(i)) fopen = open ## If reference+denovo we drop the reads back into clust.gz ## and let the muscle_chunker do it's thing back in cluster_within if data.paramsdict["assembly_method"] == "denovo+reference": tmpfile = os.path.join(data.dirs.clusts, sample.name+".clust.gz") fopen = gzip.open ## build clusters for aligning with muscle from the sorted bam file samfile = pysam.AlignmentFile(sample.files.mapped_reads, 'rb') #"./tortas_refmapping/PZ70-mapped-sorted.bam", "rb") ## fill clusts list and dump periodically clusts = [] nclusts = 0 for region in regions: chrom, pos1, pos2 = region.split() try: ## fetches pairs quickly but then goes slow to merge them. if "pair" in data.paramsdict["datatype"]: clust = fetch_cluster_pairs(data, samfile, chrom, int(pos1), int(pos2)) ## fetch but no need to merge else: clust = fetch_cluster_se(data, samfile, chrom, int(pos1), int(pos2)) except IndexError as inst: LOGGER.error("Bad region chrom:start-end {}:{}-{}".format(chrom, pos1, pos2)) continue if clust: clusts.append("\n".join(clust)) nclusts += 1 if nclusts == chunksize: ## write to file tmphandle = tmpfile.format(idx) with fopen(tmphandle, 'a') as tmp: #LOGGER.debug("Writing tmpfile - {}".format(tmpfile.format(idx))) #if data.paramsdict["assembly_method"] == "denovo+reference": # ## This is dumb, but for this method you need to prepend the # ## separator to maintain proper formatting of clust.gz tmp.write("\n//\n//\n".join(clusts)+"\n//\n//\n") idx += 1 nclusts = 0 clusts = [] if clusts: ## write remaining to file with fopen(tmpfile.format(idx), 'a') as tmp: #tmp.write("\n//\n//\n" + ("\n//\n//\n".join(clusts))) tmp.write("\n//\n//\n".join(clusts)+"\n//\n//\n") clusts = [] if not data.paramsdict["assembly_method"] == "denovo+reference": chunkfiles = glob.glob(os.path.join(data.tmpdir, sample.name+"_chunk_*.ali")) LOGGER.info("created chunks %s", chunkfiles) ## cleanup samfile.close()
[ "def", "ref_build_and_muscle_chunk", "(", "data", ",", "sample", ")", ":", "## get regions using bedtools", "regions", "=", "bedtools_merge", "(", "data", ",", "sample", ")", ".", "strip", "(", ")", ".", "split", "(", "\"\\n\"", ")", "nregions", "=", "len", "(", "regions", ")", "chunksize", "=", "(", "nregions", "/", "10", ")", "+", "(", "nregions", "%", "10", ")", "LOGGER", ".", "debug", "(", "\"nregions {} chunksize {}\"", ".", "format", "(", "nregions", ",", "chunksize", ")", ")", "## create an output file to write clusters to", "idx", "=", "0", "tmpfile", "=", "os", ".", "path", ".", "join", "(", "data", ".", "tmpdir", ",", "sample", ".", "name", "+", "\"_chunk_{}.ali\"", ")", "## remove old files if they exist to avoid append errors", "for", "i", "in", "range", "(", "11", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "tmpfile", ".", "format", "(", "i", ")", ")", ":", "os", ".", "remove", "(", "tmpfile", ".", "format", "(", "i", ")", ")", "fopen", "=", "open", "## If reference+denovo we drop the reads back into clust.gz", "## and let the muscle_chunker do it's thing back in cluster_within", "if", "data", ".", "paramsdict", "[", "\"assembly_method\"", "]", "==", "\"denovo+reference\"", ":", "tmpfile", "=", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "clusts", ",", "sample", ".", "name", "+", "\".clust.gz\"", ")", "fopen", "=", "gzip", ".", "open", "## build clusters for aligning with muscle from the sorted bam file", "samfile", "=", "pysam", ".", "AlignmentFile", "(", "sample", ".", "files", ".", "mapped_reads", ",", "'rb'", ")", "#\"./tortas_refmapping/PZ70-mapped-sorted.bam\", \"rb\")", "## fill clusts list and dump periodically", "clusts", "=", "[", "]", "nclusts", "=", "0", "for", "region", "in", "regions", ":", "chrom", ",", "pos1", ",", "pos2", "=", "region", ".", "split", "(", ")", "try", ":", "## fetches pairs quickly but then goes slow to merge them.", "if", "\"pair\"", "in", "data", ".", "paramsdict", "[", "\"datatype\"", "]", ":", "clust", "=", "fetch_cluster_pairs", "(", "data", ",", "samfile", ",", "chrom", ",", "int", "(", "pos1", ")", ",", "int", "(", "pos2", ")", ")", "## fetch but no need to merge", "else", ":", "clust", "=", "fetch_cluster_se", "(", "data", ",", "samfile", ",", "chrom", ",", "int", "(", "pos1", ")", ",", "int", "(", "pos2", ")", ")", "except", "IndexError", "as", "inst", ":", "LOGGER", ".", "error", "(", "\"Bad region chrom:start-end {}:{}-{}\"", ".", "format", "(", "chrom", ",", "pos1", ",", "pos2", ")", ")", "continue", "if", "clust", ":", "clusts", ".", "append", "(", "\"\\n\"", ".", "join", "(", "clust", ")", ")", "nclusts", "+=", "1", "if", "nclusts", "==", "chunksize", ":", "## write to file", "tmphandle", "=", "tmpfile", ".", "format", "(", "idx", ")", "with", "fopen", "(", "tmphandle", ",", "'a'", ")", "as", "tmp", ":", "#LOGGER.debug(\"Writing tmpfile - {}\".format(tmpfile.format(idx)))", "#if data.paramsdict[\"assembly_method\"] == \"denovo+reference\":", "# ## This is dumb, but for this method you need to prepend the", "# ## separator to maintain proper formatting of clust.gz", "tmp", ".", "write", "(", "\"\\n//\\n//\\n\"", ".", "join", "(", "clusts", ")", "+", "\"\\n//\\n//\\n\"", ")", "idx", "+=", "1", "nclusts", "=", "0", "clusts", "=", "[", "]", "if", "clusts", ":", "## write remaining to file", "with", "fopen", "(", "tmpfile", ".", "format", "(", "idx", ")", ",", "'a'", ")", "as", "tmp", ":", "#tmp.write(\"\\n//\\n//\\n\" + (\"\\n//\\n//\\n\".join(clusts)))", "tmp", ".", "write", "(", "\"\\n//\\n//\\n\"", ".", "join", "(", "clusts", ")", "+", "\"\\n//\\n//\\n\"", ")", "clusts", "=", "[", "]", "if", "not", "data", ".", "paramsdict", "[", "\"assembly_method\"", "]", "==", "\"denovo+reference\"", ":", "chunkfiles", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "data", ".", "tmpdir", ",", "sample", ".", "name", "+", "\"_chunk_*.ali\"", ")", ")", "LOGGER", ".", "info", "(", "\"created chunks %s\"", ",", "chunkfiles", ")", "## cleanup", "samfile", ".", "close", "(", ")" ]
41.7
22.6625
def _pci_seq(ipmicmd): """Get output of ipmiraw command and the ordinal numbers. :param ipmicmd: IPMI command object. :returns: List of tuple contain ordinal number and output of ipmiraw command. """ for i in range(1, 0xff + 1): try: res = _send_raw_command(ipmicmd, GET_PCI % hex(i)) yield i, res except ipmi_exception.IpmiException as e: raise IPMIFailure( "IPMI operation '%(operation)s' failed: %(error)s" % {'operation': "GET PCI device quantity", 'error': e})
[ "def", "_pci_seq", "(", "ipmicmd", ")", ":", "for", "i", "in", "range", "(", "1", ",", "0xff", "+", "1", ")", ":", "try", ":", "res", "=", "_send_raw_command", "(", "ipmicmd", ",", "GET_PCI", "%", "hex", "(", "i", ")", ")", "yield", "i", ",", "res", "except", "ipmi_exception", ".", "IpmiException", "as", "e", ":", "raise", "IPMIFailure", "(", "\"IPMI operation '%(operation)s' failed: %(error)s\"", "%", "{", "'operation'", ":", "\"GET PCI device quantity\"", ",", "'error'", ":", "e", "}", ")" ]
37.333333
17.866667
def process_input(self, question): """ takes a question and returns the best answer based on known skills """ ans = '' if self.status == 'EXIT': print('bye') sys.exit() if '?' in question: ans = self.info.find_answer(question) elif question.startswith(':LIST'): ans = 'List of Raw Input\n' for i in self.info.raw_input: ans += str(i) + '\n' else: #ans = 'I dont'' know' ans = 'Adding info..' self.info.raw_input.append(question) self.lg.record_process('aggie.py', 'Question > ' + question) self.lg.record_process('aggie.py', 'Answer > ' + ans) return ans
[ "def", "process_input", "(", "self", ",", "question", ")", ":", "ans", "=", "''", "if", "self", ".", "status", "==", "'EXIT'", ":", "print", "(", "'bye'", ")", "sys", ".", "exit", "(", ")", "if", "'?'", "in", "question", ":", "ans", "=", "self", ".", "info", ".", "find_answer", "(", "question", ")", "elif", "question", ".", "startswith", "(", "':LIST'", ")", ":", "ans", "=", "'List of Raw Input\\n'", "for", "i", "in", "self", ".", "info", ".", "raw_input", ":", "ans", "+=", "str", "(", "i", ")", "+", "'\\n'", "else", ":", "#ans = 'I dont'' know'", "ans", "=", "'Adding info..'", "self", ".", "info", ".", "raw_input", ".", "append", "(", "question", ")", "self", ".", "lg", ".", "record_process", "(", "'aggie.py'", ",", "'Question > '", "+", "question", ")", "self", ".", "lg", ".", "record_process", "(", "'aggie.py'", ",", "'Answer > '", "+", "ans", ")", "return", "ans" ]
31.958333
14.291667
def load(settings=None, namespace=None, prefix=None): """ Call this guy to init pyaas stuffs :param settings: Alternative name of ini file to load :param namespace: Namespace is used to derive paths, pass '' for an empty namespace :param prefix: The root path of the app :return: None """ parent = pyaas.util.getParent() script_name = os.path.basename(parent) script_name = script_name.rsplit('.', 1)[0] if prefix is None: # get the filename of the caller # get the directory name of the file prefix = os.path.dirname(parent) if prefix.endswith(os.path.sep + 'bin'): prefix = os.path.join(prefix, '..') prefix = os.path.abspath(prefix) prefix = os.path.abspath(prefix) if pyaas.prefix != prefix: pyaas.prefix = prefix logging.debug('Setting prefix to "%s"', pyaas.prefix) if namespace is None: namespace = script_name if namespace != pyaas.namespace: pyaas.namespace = namespace logging.debug('Setting namespace to "%s"', pyaas.namespace) # if settings is not passed in use the supplied or derived namespace settings = settings or namespace pyaas.args = pyaas.argparser.parse_args() pyaas.config = configparser.SafeConfigParser(dict_type=collections.OrderedDict) pyaas.config.optionxform = str ini_files = [ pyaas.paths('etc', settings + '.ini'), pyaas.paths('etc', settings + '.ini.local') ] if pyaas.args.ini: ini_files.append(pyaas.args.ini) try: ok = pyaas.config.read(ini_files) except configparser.ParsingError as e: raise pyaas.error('Unable to parse file: %s', e) if not ok: raise pyaas.error('Unable to read config file(s): %s', ini_files) # setup file log file_name = '%s_%s.log' % (script_name, time.strftime('%Y%m%d_%H%M%S')) # hack back in single log file option without breaking existing code if pyaas.config.has_section('logging'): if pyaas.config.has_option('logging', 'append'): append = pyaas.config.getboolean('logging', 'append') if append: file_name = script_name + '.log' full_path = pyaas.paths('var', file_name) logfile = logging.FileHandler(full_path) logfile.setLevel(logging.INFO) logfile.setFormatter( logging.Formatter( fmt = '%(asctime)s %(levelname)-8s %(message)s', datefmt = '%Y-%m-%d %H:%M:%S', ) ) # add the handlers to the logger root = logging.getLogger() root.addHandler(logfile) if pyaas.args.debug: root.setLevel(logging.DEBUG) logfile.setLevel(logging.DEBUG) # call this here if there is no daemon option if not hasattr(pyaas.args, 'daemon'): pyaas.module.load() return
[ "def", "load", "(", "settings", "=", "None", ",", "namespace", "=", "None", ",", "prefix", "=", "None", ")", ":", "parent", "=", "pyaas", ".", "util", ".", "getParent", "(", ")", "script_name", "=", "os", ".", "path", ".", "basename", "(", "parent", ")", "script_name", "=", "script_name", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "0", "]", "if", "prefix", "is", "None", ":", "# get the filename of the caller", "# get the directory name of the file", "prefix", "=", "os", ".", "path", ".", "dirname", "(", "parent", ")", "if", "prefix", ".", "endswith", "(", "os", ".", "path", ".", "sep", "+", "'bin'", ")", ":", "prefix", "=", "os", ".", "path", ".", "join", "(", "prefix", ",", "'..'", ")", "prefix", "=", "os", ".", "path", ".", "abspath", "(", "prefix", ")", "prefix", "=", "os", ".", "path", ".", "abspath", "(", "prefix", ")", "if", "pyaas", ".", "prefix", "!=", "prefix", ":", "pyaas", ".", "prefix", "=", "prefix", "logging", ".", "debug", "(", "'Setting prefix to \"%s\"'", ",", "pyaas", ".", "prefix", ")", "if", "namespace", "is", "None", ":", "namespace", "=", "script_name", "if", "namespace", "!=", "pyaas", ".", "namespace", ":", "pyaas", ".", "namespace", "=", "namespace", "logging", ".", "debug", "(", "'Setting namespace to \"%s\"'", ",", "pyaas", ".", "namespace", ")", "# if settings is not passed in use the supplied or derived namespace", "settings", "=", "settings", "or", "namespace", "pyaas", ".", "args", "=", "pyaas", ".", "argparser", ".", "parse_args", "(", ")", "pyaas", ".", "config", "=", "configparser", ".", "SafeConfigParser", "(", "dict_type", "=", "collections", ".", "OrderedDict", ")", "pyaas", ".", "config", ".", "optionxform", "=", "str", "ini_files", "=", "[", "pyaas", ".", "paths", "(", "'etc'", ",", "settings", "+", "'.ini'", ")", ",", "pyaas", ".", "paths", "(", "'etc'", ",", "settings", "+", "'.ini.local'", ")", "]", "if", "pyaas", ".", "args", ".", "ini", ":", "ini_files", ".", "append", "(", "pyaas", ".", "args", ".", "ini", ")", "try", ":", "ok", "=", "pyaas", ".", "config", ".", "read", "(", "ini_files", ")", "except", "configparser", ".", "ParsingError", "as", "e", ":", "raise", "pyaas", ".", "error", "(", "'Unable to parse file: %s'", ",", "e", ")", "if", "not", "ok", ":", "raise", "pyaas", ".", "error", "(", "'Unable to read config file(s): %s'", ",", "ini_files", ")", "# setup file log", "file_name", "=", "'%s_%s.log'", "%", "(", "script_name", ",", "time", ".", "strftime", "(", "'%Y%m%d_%H%M%S'", ")", ")", "# hack back in single log file option without breaking existing code", "if", "pyaas", ".", "config", ".", "has_section", "(", "'logging'", ")", ":", "if", "pyaas", ".", "config", ".", "has_option", "(", "'logging'", ",", "'append'", ")", ":", "append", "=", "pyaas", ".", "config", ".", "getboolean", "(", "'logging'", ",", "'append'", ")", "if", "append", ":", "file_name", "=", "script_name", "+", "'.log'", "full_path", "=", "pyaas", ".", "paths", "(", "'var'", ",", "file_name", ")", "logfile", "=", "logging", ".", "FileHandler", "(", "full_path", ")", "logfile", ".", "setLevel", "(", "logging", ".", "INFO", ")", "logfile", ".", "setFormatter", "(", "logging", ".", "Formatter", "(", "fmt", "=", "'%(asctime)s %(levelname)-8s %(message)s'", ",", "datefmt", "=", "'%Y-%m-%d %H:%M:%S'", ",", ")", ")", "# add the handlers to the logger", "root", "=", "logging", ".", "getLogger", "(", ")", "root", ".", "addHandler", "(", "logfile", ")", "if", "pyaas", ".", "args", ".", "debug", ":", "root", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "logfile", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "# call this here if there is no daemon option", "if", "not", "hasattr", "(", "pyaas", ".", "args", ",", "'daemon'", ")", ":", "pyaas", ".", "module", ".", "load", "(", ")", "return" ]
30.130435
19.347826
def is_literal_or_name(value): """Return True if value is a literal or a name.""" try: ast.literal_eval(value) return True except (SyntaxError, ValueError): pass if value.strip() in ['dict()', 'list()', 'set()']: return True # Support removal of variables on the right side. But make sure # there are no dots, which could mean an access of a property. return re.match(r'^\w+\s*$', value)
[ "def", "is_literal_or_name", "(", "value", ")", ":", "try", ":", "ast", ".", "literal_eval", "(", "value", ")", "return", "True", "except", "(", "SyntaxError", ",", "ValueError", ")", ":", "pass", "if", "value", ".", "strip", "(", ")", "in", "[", "'dict()'", ",", "'list()'", ",", "'set()'", "]", ":", "return", "True", "# Support removal of variables on the right side. But make sure", "# there are no dots, which could mean an access of a property.", "return", "re", ".", "match", "(", "r'^\\w+\\s*$'", ",", "value", ")" ]
31.142857
19.428571
def start(name): ''' Start the specified service CLI Example: .. code-block:: bash salt '*' service.start <service name> ''' cmd = '/usr/sbin/svcadm enable -s -t {0}'.format(name) retcode = __salt__['cmd.retcode'](cmd, python_shell=False) if not retcode: return True if retcode == 3: # Return code 3 means there was a problem with the service # A common case is being in the 'maintenance' state # Attempt a clear and try one more time clear_cmd = '/usr/sbin/svcadm clear {0}'.format(name) __salt__['cmd.retcode'](clear_cmd, python_shell=False) return not __salt__['cmd.retcode'](cmd, python_shell=False) return False
[ "def", "start", "(", "name", ")", ":", "cmd", "=", "'/usr/sbin/svcadm enable -s -t {0}'", ".", "format", "(", "name", ")", "retcode", "=", "__salt__", "[", "'cmd.retcode'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", "if", "not", "retcode", ":", "return", "True", "if", "retcode", "==", "3", ":", "# Return code 3 means there was a problem with the service", "# A common case is being in the 'maintenance' state", "# Attempt a clear and try one more time", "clear_cmd", "=", "'/usr/sbin/svcadm clear {0}'", ".", "format", "(", "name", ")", "__salt__", "[", "'cmd.retcode'", "]", "(", "clear_cmd", ",", "python_shell", "=", "False", ")", "return", "not", "__salt__", "[", "'cmd.retcode'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", "return", "False" ]
31.909091
23.272727
def strain_in_plane(self, **kwargs): ''' Returns the in-plane strain assuming no lattice relaxation, which is positive for tensile strain and negative for compressive strain. ''' if self._strain_out_of_plane is not None: return ((self._strain_out_of_plane / -2.) * (self.unstrained.c11(**kwargs) / self.unstrained.c12(**kwargs) ) ) else: return 1 - self.unstrained.a(**kwargs) / self.substrate.a(**kwargs)
[ "def", "strain_in_plane", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_strain_out_of_plane", "is", "not", "None", ":", "return", "(", "(", "self", ".", "_strain_out_of_plane", "/", "-", "2.", ")", "*", "(", "self", ".", "unstrained", ".", "c11", "(", "*", "*", "kwargs", ")", "/", "self", ".", "unstrained", ".", "c12", "(", "*", "*", "kwargs", ")", ")", ")", "else", ":", "return", "1", "-", "self", ".", "unstrained", ".", "a", "(", "*", "*", "kwargs", ")", "/", "self", ".", "substrate", ".", "a", "(", "*", "*", "kwargs", ")" ]
46.363636
22.545455
def get_sql(self, debug=False, use_cache=True): """ Generates the sql for this query window and returns the sql as a string. :type debug: bool :param debug: If True, the sql will be returned in a format that is easier to read and debug. Defaults to False :type use_cache: bool :param use_cache: If True, the query will returned the cached sql if it exists rather then generating the sql again. If False, the sql will be generated again. Defaults to True. :rtype: str :return: The generated sql for this query window """ # TODO: implement caching and debug sql = '' sql += self.build_partition_by_fields() sql += self.build_order_by(use_alias=False) sql += self.build_limit() sql = sql.strip() sql = 'OVER ({0})'.format(sql) self.sql = sql return self.sql
[ "def", "get_sql", "(", "self", ",", "debug", "=", "False", ",", "use_cache", "=", "True", ")", ":", "# TODO: implement caching and debug", "sql", "=", "''", "sql", "+=", "self", ".", "build_partition_by_fields", "(", ")", "sql", "+=", "self", ".", "build_order_by", "(", "use_alias", "=", "False", ")", "sql", "+=", "self", ".", "build_limit", "(", ")", "sql", "=", "sql", ".", "strip", "(", ")", "sql", "=", "'OVER ({0})'", ".", "format", "(", "sql", ")", "self", ".", "sql", "=", "sql", "return", "self", ".", "sql" ]
36.08
22.48
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'name') and self.name is not None: _dict['name'] = self.name if hasattr(self, 'dbpedia_resource') and self.dbpedia_resource is not None: _dict['dbpedia_resource'] = self.dbpedia_resource if hasattr(self, 'subtype') and self.subtype is not None: _dict['subtype'] = self.subtype return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'name'", ")", "and", "self", ".", "name", "is", "not", "None", ":", "_dict", "[", "'name'", "]", "=", "self", ".", "name", "if", "hasattr", "(", "self", ",", "'dbpedia_resource'", ")", "and", "self", ".", "dbpedia_resource", "is", "not", "None", ":", "_dict", "[", "'dbpedia_resource'", "]", "=", "self", ".", "dbpedia_resource", "if", "hasattr", "(", "self", ",", "'subtype'", ")", "and", "self", ".", "subtype", "is", "not", "None", ":", "_dict", "[", "'subtype'", "]", "=", "self", ".", "subtype", "return", "_dict" ]
44.181818
17
def _parse_file(self, file_obj): """Directly read from file handler. Note that this will move the file pointer. """ byte_data = file_obj.read(self.size) self._parse_byte_data(byte_data)
[ "def", "_parse_file", "(", "self", ",", "file_obj", ")", ":", "byte_data", "=", "file_obj", ".", "read", "(", "self", ".", "size", ")", "self", ".", "_parse_byte_data", "(", "byte_data", ")" ]
27.5
12.75
def update_user(self, auth, username, update): """ Updates the user with username ``username`` according to ``update``. :param auth.Authentication auth: authentication object, must be admin-level :param str username: username of user to update :param GogsUserUpdate update: a ``GogsUserUpdate`` object describing the requested update :return: the updated user :rtype: GogsUser :raises NetworkFailure: if there is an error communicating with the server :raises ApiFailure: if the request cannot be serviced """ path = "/admin/users/{}".format(username) response = self.patch(path, auth=auth, data=update.as_dict()) return GogsUser.from_json(response.json())
[ "def", "update_user", "(", "self", ",", "auth", ",", "username", ",", "update", ")", ":", "path", "=", "\"/admin/users/{}\"", ".", "format", "(", "username", ")", "response", "=", "self", ".", "patch", "(", "path", ",", "auth", "=", "auth", ",", "data", "=", "update", ".", "as_dict", "(", ")", ")", "return", "GogsUser", ".", "from_json", "(", "response", ".", "json", "(", ")", ")" ]
49.8
22.066667
def locations_for(self, city_name, country=None, matching='nocase'): """ Returns a list of Location objects corresponding to the int IDs and relative toponyms and 2-chars country of the cities matching the provided city name. The rule for identifying matchings is according to the provided `matching` parameter value. If `country` is provided, the search is restricted to the cities of the specified country. :param country: two character str representing the country where to search for the city. Defaults to `None`, which means: search in all countries. :param matching: str among `exact` (literal, case-sensitive matching), `nocase` (literal, case-insensitive matching) and `like` (matches cities whose name contains as a substring the string fed to the function, no matter the case). Defaults to `nocase`. :raises ValueError if the value for `matching` is unknown :return: list of `weatherapi25.location.Location` objects """ if not city_name: return [] if matching not in self.MATCHINGS: raise ValueError("Unknown type of matching: " "allowed values are %s" % ", ".join(self.MATCHINGS)) if country is not None and len(country) != 2: raise ValueError("Country must be a 2-char string") splits = self._filter_matching_lines(city_name, country, matching) return [Location(item[0], float(item[3]), float(item[2]), int(item[1]), item[4]) for item in splits]
[ "def", "locations_for", "(", "self", ",", "city_name", ",", "country", "=", "None", ",", "matching", "=", "'nocase'", ")", ":", "if", "not", "city_name", ":", "return", "[", "]", "if", "matching", "not", "in", "self", ".", "MATCHINGS", ":", "raise", "ValueError", "(", "\"Unknown type of matching: \"", "\"allowed values are %s\"", "%", "\", \"", ".", "join", "(", "self", ".", "MATCHINGS", ")", ")", "if", "country", "is", "not", "None", "and", "len", "(", "country", ")", "!=", "2", ":", "raise", "ValueError", "(", "\"Country must be a 2-char string\"", ")", "splits", "=", "self", ".", "_filter_matching_lines", "(", "city_name", ",", "country", ",", "matching", ")", "return", "[", "Location", "(", "item", "[", "0", "]", ",", "float", "(", "item", "[", "3", "]", ")", ",", "float", "(", "item", "[", "2", "]", ")", ",", "int", "(", "item", "[", "1", "]", ")", ",", "item", "[", "4", "]", ")", "for", "item", "in", "splits", "]" ]
55.275862
22.172414
def file_md5(f, size=8192): "Calculates the MD5 of a file." md5 = hashlib.md5() while True: data = f.read(size) if not data: break md5.update(data) return md5.hexdigest()
[ "def", "file_md5", "(", "f", ",", "size", "=", "8192", ")", ":", "md5", "=", "hashlib", ".", "md5", "(", ")", "while", "True", ":", "data", "=", "f", ".", "read", "(", "size", ")", "if", "not", "data", ":", "break", "md5", ".", "update", "(", "data", ")", "return", "md5", ".", "hexdigest", "(", ")" ]
23.777778
16.222222
def _load_init(self, data, ctx): """(Re)initializes by loading from data.""" if self.shape: for self_dim, data_dim in zip(self.shape, data.shape): assert self_dim in (0, data_dim), \ "Failed loading Parameter '%s' from saved params: " \ "shape incompatible expected %s vs saved %s"%( self.name, str(self.shape), str(data.shape)) self.shape = tuple(i if i != 0 else j for i, j in zip(self.shape, data.shape)) if self.dtype: assert np.dtype(self.dtype).type == data.dtype, \ "Failed loading Parameter '%s' from saved params: " \ "dtype incompatible expected %s vs saved %s"%( self.name, str(self.dtype), str(data.dtype)) if self._stype != data.stype: data = data.tostype(self._stype) if isinstance(ctx, Context): ctx = [ctx] if self._data is None: if self._deferred_init: assert ctx is None or set(ctx) == set(self._deferred_init[1]), \ "Failed to load Parameter '%s' on %s because it was " \ "previous initialized on %s."%( self.name, str(ctx), str(self.list_ctx())) ctx = self._deferred_init[1] elif ctx is None: ctx = [cpu()] self._init_impl(data, ctx) else: assert ctx is None or set(ctx) == set(self.list_ctx()), \ "Failed to load Parameter '%s' on %s because it was " \ "previous initialized on %s."%( self.name, str(ctx), str(self.list_ctx())) self.set_data(data) self._deferred_init = ()
[ "def", "_load_init", "(", "self", ",", "data", ",", "ctx", ")", ":", "if", "self", ".", "shape", ":", "for", "self_dim", ",", "data_dim", "in", "zip", "(", "self", ".", "shape", ",", "data", ".", "shape", ")", ":", "assert", "self_dim", "in", "(", "0", ",", "data_dim", ")", ",", "\"Failed loading Parameter '%s' from saved params: \"", "\"shape incompatible expected %s vs saved %s\"", "%", "(", "self", ".", "name", ",", "str", "(", "self", ".", "shape", ")", ",", "str", "(", "data", ".", "shape", ")", ")", "self", ".", "shape", "=", "tuple", "(", "i", "if", "i", "!=", "0", "else", "j", "for", "i", ",", "j", "in", "zip", "(", "self", ".", "shape", ",", "data", ".", "shape", ")", ")", "if", "self", ".", "dtype", ":", "assert", "np", ".", "dtype", "(", "self", ".", "dtype", ")", ".", "type", "==", "data", ".", "dtype", ",", "\"Failed loading Parameter '%s' from saved params: \"", "\"dtype incompatible expected %s vs saved %s\"", "%", "(", "self", ".", "name", ",", "str", "(", "self", ".", "dtype", ")", ",", "str", "(", "data", ".", "dtype", ")", ")", "if", "self", ".", "_stype", "!=", "data", ".", "stype", ":", "data", "=", "data", ".", "tostype", "(", "self", ".", "_stype", ")", "if", "isinstance", "(", "ctx", ",", "Context", ")", ":", "ctx", "=", "[", "ctx", "]", "if", "self", ".", "_data", "is", "None", ":", "if", "self", ".", "_deferred_init", ":", "assert", "ctx", "is", "None", "or", "set", "(", "ctx", ")", "==", "set", "(", "self", ".", "_deferred_init", "[", "1", "]", ")", ",", "\"Failed to load Parameter '%s' on %s because it was \"", "\"previous initialized on %s.\"", "%", "(", "self", ".", "name", ",", "str", "(", "ctx", ")", ",", "str", "(", "self", ".", "list_ctx", "(", ")", ")", ")", "ctx", "=", "self", ".", "_deferred_init", "[", "1", "]", "elif", "ctx", "is", "None", ":", "ctx", "=", "[", "cpu", "(", ")", "]", "self", ".", "_init_impl", "(", "data", ",", "ctx", ")", "else", ":", "assert", "ctx", "is", "None", "or", "set", "(", "ctx", ")", "==", "set", "(", "self", ".", "list_ctx", "(", ")", ")", ",", "\"Failed to load Parameter '%s' on %s because it was \"", "\"previous initialized on %s.\"", "%", "(", "self", ".", "name", ",", "str", "(", "ctx", ")", ",", "str", "(", "self", ".", "list_ctx", "(", ")", ")", ")", "self", ".", "set_data", "(", "data", ")", "self", ".", "_deferred_init", "=", "(", ")" ]
49.685714
18