text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def gff3_to_dataframe(path, attributes=None, region=None, score_fill=-1, phase_fill=-1, attributes_fill='.', tabix='tabix', **kwargs): """Load data from a GFF3 into a pandas DataFrame. Parameters ---------- path : string Path to input file. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. tabix : string, optional Tabix command. Returns ------- pandas.DataFrame """ import pandas # read records recs = list(iter_gff3(path, attributes=attributes, region=region, score_fill=score_fill, phase_fill=phase_fill, attributes_fill=attributes_fill, tabix=tabix)) # load into pandas columns = ['seqid', 'source', 'type', 'start', 'end', 'score', 'strand', 'phase'] if attributes: columns += list(attributes) df = pandas.DataFrame.from_records(recs, columns=columns, **kwargs) return df
[ "def", "gff3_to_dataframe", "(", "path", ",", "attributes", "=", "None", ",", "region", "=", "None", ",", "score_fill", "=", "-", "1", ",", "phase_fill", "=", "-", "1", ",", "attributes_fill", "=", "'.'", ",", "tabix", "=", "'tabix'", ",", "*", "*", "kwargs", ")", ":", "import", "pandas", "# read records", "recs", "=", "list", "(", "iter_gff3", "(", "path", ",", "attributes", "=", "attributes", ",", "region", "=", "region", ",", "score_fill", "=", "score_fill", ",", "phase_fill", "=", "phase_fill", ",", "attributes_fill", "=", "attributes_fill", ",", "tabix", "=", "tabix", ")", ")", "# load into pandas", "columns", "=", "[", "'seqid'", ",", "'source'", ",", "'type'", ",", "'start'", ",", "'end'", ",", "'score'", ",", "'strand'", ",", "'phase'", "]", "if", "attributes", ":", "columns", "+=", "list", "(", "attributes", ")", "df", "=", "pandas", ".", "DataFrame", ".", "from_records", "(", "recs", ",", "columns", "=", "columns", ",", "*", "*", "kwargs", ")", "return", "df" ]
34.348837
24.255814
def to_gexf(graph, output_path): """Writes graph to `GEXF <http://gexf.net>`_. Uses the NetworkX method `write_gexf <http://networkx.lanl.gov/reference/generated/networkx.readwrite.gexf.write_gexf.html>`_. Parameters ---------- graph : networkx.Graph The Graph to be exported to GEXF. output_path : str Full path, including filename (without suffix). e.g. using "./graphFolder/graphFile" will result in a GEXF file at ./graphFolder/graphFile.gexf. """ warnings.warn("Removed in 0.8.", DeprecationWarning) graph = _strip_list_attributes(graph) nx.write_gexf(graph, output_path + ".gexf")
[ "def", "to_gexf", "(", "graph", ",", "output_path", ")", ":", "warnings", ".", "warn", "(", "\"Removed in 0.8.\"", ",", "DeprecationWarning", ")", "graph", "=", "_strip_list_attributes", "(", "graph", ")", "nx", ".", "write_gexf", "(", "graph", ",", "output_path", "+", "\".gexf\"", ")" ]
32.35
20.35
def keypair_add(self, name, pubfile=None, pubkey=None): ''' Add a keypair ''' nt_ks = self.compute_conn if pubfile: with salt.utils.files.fopen(pubfile, 'r') as fp_: pubkey = salt.utils.stringutils.to_unicode(fp_.read()) if not pubkey: return False nt_ks.keypairs.create(name, public_key=pubkey) ret = {'name': name, 'pubkey': pubkey} return ret
[ "def", "keypair_add", "(", "self", ",", "name", ",", "pubfile", "=", "None", ",", "pubkey", "=", "None", ")", ":", "nt_ks", "=", "self", ".", "compute_conn", "if", "pubfile", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "pubfile", ",", "'r'", ")", "as", "fp_", ":", "pubkey", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "fp_", ".", "read", "(", ")", ")", "if", "not", "pubkey", ":", "return", "False", "nt_ks", ".", "keypairs", ".", "create", "(", "name", ",", "public_key", "=", "pubkey", ")", "ret", "=", "{", "'name'", ":", "name", ",", "'pubkey'", ":", "pubkey", "}", "return", "ret" ]
34.230769
19
def validate(cls, mapper_spec): """Validate mapper specification. Args: mapper_spec: an instance of model.MapperSpec Raises: BadReaderParamsError: if the specification is invalid for any reason such as missing the bucket name or providing an invalid bucket name. """ reader_spec = cls.get_params(mapper_spec, allow_old=False) # Bucket Name is required if cls.BUCKET_NAME_PARAM not in reader_spec: raise errors.BadReaderParamsError( "%s is required for Google Cloud Storage" % cls.BUCKET_NAME_PARAM) try: cloudstorage.validate_bucket_name( reader_spec[cls.BUCKET_NAME_PARAM]) except ValueError, error: raise errors.BadReaderParamsError("Bad bucket name, %s" % (error)) # Object Name(s) are required if cls.OBJECT_NAMES_PARAM not in reader_spec: raise errors.BadReaderParamsError( "%s is required for Google Cloud Storage" % cls.OBJECT_NAMES_PARAM) filenames = reader_spec[cls.OBJECT_NAMES_PARAM] if not isinstance(filenames, list): raise errors.BadReaderParamsError( "Object name list is not a list but a %s" % filenames.__class__.__name__) for filename in filenames: if not isinstance(filename, basestring): raise errors.BadReaderParamsError( "Object name is not a string but a %s" % filename.__class__.__name__) if cls.DELIMITER_PARAM in reader_spec: delimiter = reader_spec[cls.DELIMITER_PARAM] if not isinstance(delimiter, basestring): raise errors.BadReaderParamsError( "%s is not a string but a %s" % (cls.DELIMITER_PARAM, type(delimiter)))
[ "def", "validate", "(", "cls", ",", "mapper_spec", ")", ":", "reader_spec", "=", "cls", ".", "get_params", "(", "mapper_spec", ",", "allow_old", "=", "False", ")", "# Bucket Name is required", "if", "cls", ".", "BUCKET_NAME_PARAM", "not", "in", "reader_spec", ":", "raise", "errors", ".", "BadReaderParamsError", "(", "\"%s is required for Google Cloud Storage\"", "%", "cls", ".", "BUCKET_NAME_PARAM", ")", "try", ":", "cloudstorage", ".", "validate_bucket_name", "(", "reader_spec", "[", "cls", ".", "BUCKET_NAME_PARAM", "]", ")", "except", "ValueError", ",", "error", ":", "raise", "errors", ".", "BadReaderParamsError", "(", "\"Bad bucket name, %s\"", "%", "(", "error", ")", ")", "# Object Name(s) are required", "if", "cls", ".", "OBJECT_NAMES_PARAM", "not", "in", "reader_spec", ":", "raise", "errors", ".", "BadReaderParamsError", "(", "\"%s is required for Google Cloud Storage\"", "%", "cls", ".", "OBJECT_NAMES_PARAM", ")", "filenames", "=", "reader_spec", "[", "cls", ".", "OBJECT_NAMES_PARAM", "]", "if", "not", "isinstance", "(", "filenames", ",", "list", ")", ":", "raise", "errors", ".", "BadReaderParamsError", "(", "\"Object name list is not a list but a %s\"", "%", "filenames", ".", "__class__", ".", "__name__", ")", "for", "filename", "in", "filenames", ":", "if", "not", "isinstance", "(", "filename", ",", "basestring", ")", ":", "raise", "errors", ".", "BadReaderParamsError", "(", "\"Object name is not a string but a %s\"", "%", "filename", ".", "__class__", ".", "__name__", ")", "if", "cls", ".", "DELIMITER_PARAM", "in", "reader_spec", ":", "delimiter", "=", "reader_spec", "[", "cls", ".", "DELIMITER_PARAM", "]", "if", "not", "isinstance", "(", "delimiter", ",", "basestring", ")", ":", "raise", "errors", ".", "BadReaderParamsError", "(", "\"%s is not a string but a %s\"", "%", "(", "cls", ".", "DELIMITER_PARAM", ",", "type", "(", "delimiter", ")", ")", ")" ]
37.909091
13.136364
def setBendLength(self, x): """ set bend length :param x: new bend length to be assigned, [m] :return: None """ if x != self.bend_length: self.bend_length = x self.refresh = True
[ "def", "setBendLength", "(", "self", ",", "x", ")", ":", "if", "x", "!=", "self", ".", "bend_length", ":", "self", ".", "bend_length", "=", "x", "self", ".", "refresh", "=", "True" ]
26.111111
12.111111
def linear_reaction_coefficients(model, reactions=None): """Coefficient for the reactions in a linear objective. Parameters ---------- model : cobra model the model object that defined the objective reactions : list an optional list for the reactions to get the coefficients for. All reactions if left missing. Returns ------- dict A dictionary where the key is the reaction object and the value is the corresponding coefficient. Empty dictionary if there are no linear terms in the objective. """ linear_coefficients = {} reactions = model.reactions if not reactions else reactions try: objective_expression = model.solver.objective.expression coefficients = objective_expression.as_coefficients_dict() except AttributeError: return linear_coefficients for rxn in reactions: forward_coefficient = coefficients.get(rxn.forward_variable, 0) reverse_coefficient = coefficients.get(rxn.reverse_variable, 0) if forward_coefficient != 0: if forward_coefficient == -reverse_coefficient: linear_coefficients[rxn] = float(forward_coefficient) return linear_coefficients
[ "def", "linear_reaction_coefficients", "(", "model", ",", "reactions", "=", "None", ")", ":", "linear_coefficients", "=", "{", "}", "reactions", "=", "model", ".", "reactions", "if", "not", "reactions", "else", "reactions", "try", ":", "objective_expression", "=", "model", ".", "solver", ".", "objective", ".", "expression", "coefficients", "=", "objective_expression", ".", "as_coefficients_dict", "(", ")", "except", "AttributeError", ":", "return", "linear_coefficients", "for", "rxn", "in", "reactions", ":", "forward_coefficient", "=", "coefficients", ".", "get", "(", "rxn", ".", "forward_variable", ",", "0", ")", "reverse_coefficient", "=", "coefficients", ".", "get", "(", "rxn", ".", "reverse_variable", ",", "0", ")", "if", "forward_coefficient", "!=", "0", ":", "if", "forward_coefficient", "==", "-", "reverse_coefficient", ":", "linear_coefficients", "[", "rxn", "]", "=", "float", "(", "forward_coefficient", ")", "return", "linear_coefficients" ]
38
20.9375
def _rand_sparse(m, n, density, format='csr'): """Construct base function for sprand, sprandn.""" nnz = max(min(int(m*n*density), m*n), 0) row = np.random.randint(low=0, high=m-1, size=nnz) col = np.random.randint(low=0, high=n-1, size=nnz) data = np.ones(nnz, dtype=float) # duplicate (i,j) entries will be summed together return sp.sparse.csr_matrix((data, (row, col)), shape=(m, n))
[ "def", "_rand_sparse", "(", "m", ",", "n", ",", "density", ",", "format", "=", "'csr'", ")", ":", "nnz", "=", "max", "(", "min", "(", "int", "(", "m", "*", "n", "*", "density", ")", ",", "m", "*", "n", ")", ",", "0", ")", "row", "=", "np", ".", "random", ".", "randint", "(", "low", "=", "0", ",", "high", "=", "m", "-", "1", ",", "size", "=", "nnz", ")", "col", "=", "np", ".", "random", ".", "randint", "(", "low", "=", "0", ",", "high", "=", "n", "-", "1", ",", "size", "=", "nnz", ")", "data", "=", "np", ".", "ones", "(", "nnz", ",", "dtype", "=", "float", ")", "# duplicate (i,j) entries will be summed together", "return", "sp", ".", "sparse", ".", "csr_matrix", "(", "(", "data", ",", "(", "row", ",", "col", ")", ")", ",", "shape", "=", "(", "m", ",", "n", ")", ")" ]
40.6
16
def __set_config_value(self, key, value): """Sets a value for a room config""" self.check_owner() params = {"room": self.room_id, "config": to_json({key: value})} resp = self.conn.make_api_call("setRoomConfig", params) if "error" in resp: raise RuntimeError(f"{resp['error'].get('message') or resp['error']}") return resp
[ "def", "__set_config_value", "(", "self", ",", "key", ",", "value", ")", ":", "self", ".", "check_owner", "(", ")", "params", "=", "{", "\"room\"", ":", "self", ".", "room_id", ",", "\"config\"", ":", "to_json", "(", "{", "key", ":", "value", "}", ")", "}", "resp", "=", "self", ".", "conn", ".", "make_api_call", "(", "\"setRoomConfig\"", ",", "params", ")", "if", "\"error\"", "in", "resp", ":", "raise", "RuntimeError", "(", "f\"{resp['error'].get('message') or resp['error']}\"", ")", "return", "resp" ]
41.555556
20.666667
def append_search_summary(xmldoc, process, shared_object = "standalone", lalwrapper_cvs_tag = "", lal_cvs_tag = "", comment = None, ifos = None, inseg = None, outseg = None, nevents = 0, nnodes = 1): """ Append search summary information associated with the given process to the search summary table in xmldoc. Returns the newly-created search_summary table row. """ row = lsctables.SearchSummary() row.process_id = process.process_id row.shared_object = shared_object row.lalwrapper_cvs_tag = lalwrapper_cvs_tag row.lal_cvs_tag = lal_cvs_tag row.comment = comment or process.comment row.instruments = ifos if ifos is not None else process.instruments row.in_segment = inseg row.out_segment = outseg row.nevents = nevents row.nnodes = nnodes try: tbl = lsctables.SearchSummaryTable.get_table(xmldoc) except ValueError: tbl = xmldoc.childNodes[0].appendChild(lsctables.New(lsctables.SearchSummaryTable)) tbl.append(row) return row
[ "def", "append_search_summary", "(", "xmldoc", ",", "process", ",", "shared_object", "=", "\"standalone\"", ",", "lalwrapper_cvs_tag", "=", "\"\"", ",", "lal_cvs_tag", "=", "\"\"", ",", "comment", "=", "None", ",", "ifos", "=", "None", ",", "inseg", "=", "None", ",", "outseg", "=", "None", ",", "nevents", "=", "0", ",", "nnodes", "=", "1", ")", ":", "row", "=", "lsctables", ".", "SearchSummary", "(", ")", "row", ".", "process_id", "=", "process", ".", "process_id", "row", ".", "shared_object", "=", "shared_object", "row", ".", "lalwrapper_cvs_tag", "=", "lalwrapper_cvs_tag", "row", ".", "lal_cvs_tag", "=", "lal_cvs_tag", "row", ".", "comment", "=", "comment", "or", "process", ".", "comment", "row", ".", "instruments", "=", "ifos", "if", "ifos", "is", "not", "None", "else", "process", ".", "instruments", "row", ".", "in_segment", "=", "inseg", "row", ".", "out_segment", "=", "outseg", "row", ".", "nevents", "=", "nevents", "row", ".", "nnodes", "=", "nnodes", "try", ":", "tbl", "=", "lsctables", ".", "SearchSummaryTable", ".", "get_table", "(", "xmldoc", ")", "except", "ValueError", ":", "tbl", "=", "xmldoc", ".", "childNodes", "[", "0", "]", ".", "appendChild", "(", "lsctables", ".", "New", "(", "lsctables", ".", "SearchSummaryTable", ")", ")", "tbl", ".", "append", "(", "row", ")", "return", "row" ]
37.28
24.24
def _get_chat(self) -> Dict: """ As Telegram changes where the chat object is located in the response, this method tries to be smart about finding it in the right place. """ if 'callback_query' in self._update: query = self._update['callback_query'] if 'message' in query: return query['message']['chat'] else: return {'id': query['chat_instance']} elif 'inline_query' in self._update: return patch_dict( self._update['inline_query']['from'], is_inline_query=True, ) elif 'message' in self._update: return self._update['message']['chat']
[ "def", "_get_chat", "(", "self", ")", "->", "Dict", ":", "if", "'callback_query'", "in", "self", ".", "_update", ":", "query", "=", "self", ".", "_update", "[", "'callback_query'", "]", "if", "'message'", "in", "query", ":", "return", "query", "[", "'message'", "]", "[", "'chat'", "]", "else", ":", "return", "{", "'id'", ":", "query", "[", "'chat_instance'", "]", "}", "elif", "'inline_query'", "in", "self", ".", "_update", ":", "return", "patch_dict", "(", "self", ".", "_update", "[", "'inline_query'", "]", "[", "'from'", "]", ",", "is_inline_query", "=", "True", ",", ")", "elif", "'message'", "in", "self", ".", "_update", ":", "return", "self", ".", "_update", "[", "'message'", "]", "[", "'chat'", "]" ]
37.473684
13.368421
def load_dict_from_yaml(path): """ Loads a dictionary from a yaml file :param path: the absolute path of the target yaml file :return: """ f = file(path, 'r') dictionary = yaml.load(f) f.close() return dictionary
[ "def", "load_dict_from_yaml", "(", "path", ")", ":", "f", "=", "file", "(", "path", ",", "'r'", ")", "dictionary", "=", "yaml", ".", "load", "(", "f", ")", "f", ".", "close", "(", ")", "return", "dictionary" ]
23.9
13.1
def tokenize(self): """ Tokenizes the string stored in the parser object into a list of tokens. """ self.token_list = [] ps = self.parse_string.strip() i = 0 last_token = None while i < len(ps) and ps[i].isspace(): i += 1 while i < len(ps): token = '' if ps[i].isalpha(): while i < len(ps) and (ps[i].isalnum() or ps[i] == '_'): token += ps[i] i += 1 elif ps[i].isdigit(): while i < len(ps) and (ps[i].isdigit() or ps[i] == '.' or ps[i] == 'e' or ps[i] == 'E' or (ps[i] == '+' and (ps[i-1] == 'e' or ps[i-1] == 'E')) or (ps[i] == '-' and (ps[i-1] == 'e' or ps[i-1] == 'E'))): token += ps[i] i += 1 elif ps[i] == '.': if ps[i+1].isdigit(): while i < len(ps) and (ps[i].isdigit() or ps[i] == '.'): token += ps[i] i += 1 else: while i < len(ps) and (ps[i].isalpha() or ps[i] == '.'): token += ps[i] i += 1 else: token += ps[i] i += 1 if token == '-' and \ (last_token == None or last_token == '(' or self.is_op(last_token)): token = '~' self.token_list += [token] last_token = token while i < len(ps) and ps[i].isspace(): i += 1
[ "def", "tokenize", "(", "self", ")", ":", "self", ".", "token_list", "=", "[", "]", "ps", "=", "self", ".", "parse_string", ".", "strip", "(", ")", "i", "=", "0", "last_token", "=", "None", "while", "i", "<", "len", "(", "ps", ")", "and", "ps", "[", "i", "]", ".", "isspace", "(", ")", ":", "i", "+=", "1", "while", "i", "<", "len", "(", "ps", ")", ":", "token", "=", "''", "if", "ps", "[", "i", "]", ".", "isalpha", "(", ")", ":", "while", "i", "<", "len", "(", "ps", ")", "and", "(", "ps", "[", "i", "]", ".", "isalnum", "(", ")", "or", "ps", "[", "i", "]", "==", "'_'", ")", ":", "token", "+=", "ps", "[", "i", "]", "i", "+=", "1", "elif", "ps", "[", "i", "]", ".", "isdigit", "(", ")", ":", "while", "i", "<", "len", "(", "ps", ")", "and", "(", "ps", "[", "i", "]", ".", "isdigit", "(", ")", "or", "ps", "[", "i", "]", "==", "'.'", "or", "ps", "[", "i", "]", "==", "'e'", "or", "ps", "[", "i", "]", "==", "'E'", "or", "(", "ps", "[", "i", "]", "==", "'+'", "and", "(", "ps", "[", "i", "-", "1", "]", "==", "'e'", "or", "ps", "[", "i", "-", "1", "]", "==", "'E'", ")", ")", "or", "(", "ps", "[", "i", "]", "==", "'-'", "and", "(", "ps", "[", "i", "-", "1", "]", "==", "'e'", "or", "ps", "[", "i", "-", "1", "]", "==", "'E'", ")", ")", ")", ":", "token", "+=", "ps", "[", "i", "]", "i", "+=", "1", "elif", "ps", "[", "i", "]", "==", "'.'", ":", "if", "ps", "[", "i", "+", "1", "]", ".", "isdigit", "(", ")", ":", "while", "i", "<", "len", "(", "ps", ")", "and", "(", "ps", "[", "i", "]", ".", "isdigit", "(", ")", "or", "ps", "[", "i", "]", "==", "'.'", ")", ":", "token", "+=", "ps", "[", "i", "]", "i", "+=", "1", "else", ":", "while", "i", "<", "len", "(", "ps", ")", "and", "(", "ps", "[", "i", "]", ".", "isalpha", "(", ")", "or", "ps", "[", "i", "]", "==", "'.'", ")", ":", "token", "+=", "ps", "[", "i", "]", "i", "+=", "1", "else", ":", "token", "+=", "ps", "[", "i", "]", "i", "+=", "1", "if", "token", "==", "'-'", "and", "(", "last_token", "==", "None", "or", "last_token", "==", "'('", "or", "self", ".", "is_op", "(", "last_token", ")", ")", ":", "token", "=", "'~'", "self", ".", "token_list", "+=", "[", "token", "]", "last_token", "=", "token", "while", "i", "<", "len", "(", "ps", ")", "and", "ps", "[", "i", "]", ".", "isspace", "(", ")", ":", "i", "+=", "1" ]
32.735849
19.716981
def end_output (self, **kwargs): """Write edges and end of checking info as gml comment.""" self.write_edges() self.end_graph() if self.has_part("outro"): self.write_outro() self.close_fileoutput()
[ "def", "end_output", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "write_edges", "(", ")", "self", ".", "end_graph", "(", ")", "if", "self", ".", "has_part", "(", "\"outro\"", ")", ":", "self", ".", "write_outro", "(", ")", "self", ".", "close_fileoutput", "(", ")" ]
34.714286
9
def viewport(self): """Rect: The drawing area for rendering on the current target.""" viewport = rect.Rect(0, 0, 0, 0) check_int_err(lib.SDL_RenderGetViewport(self._ptr, viewport._ptr)) return viewport
[ "def", "viewport", "(", "self", ")", ":", "viewport", "=", "rect", ".", "Rect", "(", "0", ",", "0", ",", "0", ",", "0", ")", "check_int_err", "(", "lib", ".", "SDL_RenderGetViewport", "(", "self", ".", "_ptr", ",", "viewport", ".", "_ptr", ")", ")", "return", "viewport" ]
45.8
14.4
def _mapping(self): """Fetch the entire mapping for the specified index. Returns: dict: The full mapping for the index. """ return (self.__search_client.get( "/unstable/index/{}/mapping".format(mdf_toolbox.translate_index(self.index))) ["mappings"])
[ "def", "_mapping", "(", "self", ")", ":", "return", "(", "self", ".", "__search_client", ".", "get", "(", "\"/unstable/index/{}/mapping\"", ".", "format", "(", "mdf_toolbox", ".", "translate_index", "(", "self", ".", "index", ")", ")", ")", "[", "\"mappings\"", "]", ")" ]
35.777778
18.111111
def match_color_index(self, color): """Takes an "R,G,B" string or wx.Color and returns a matching xlwt color. """ from jcvi.utils.webcolors import color_diff if isinstance(color, int): return color if color: if isinstance(color, six.string_types): rgb = map(int, color.split(',')) else: rgb = color.Get() logging.disable(logging.DEBUG) distances = [color_diff(rgb, x) for x in self.xlwt_colors] logging.disable(logging.NOTSET) result = distances.index(min(distances)) self.unused_colors.discard(self.xlwt_colors[result]) return result
[ "def", "match_color_index", "(", "self", ",", "color", ")", ":", "from", "jcvi", ".", "utils", ".", "webcolors", "import", "color_diff", "if", "isinstance", "(", "color", ",", "int", ")", ":", "return", "color", "if", "color", ":", "if", "isinstance", "(", "color", ",", "six", ".", "string_types", ")", ":", "rgb", "=", "map", "(", "int", ",", "color", ".", "split", "(", "','", ")", ")", "else", ":", "rgb", "=", "color", ".", "Get", "(", ")", "logging", ".", "disable", "(", "logging", ".", "DEBUG", ")", "distances", "=", "[", "color_diff", "(", "rgb", ",", "x", ")", "for", "x", "in", "self", ".", "xlwt_colors", "]", "logging", ".", "disable", "(", "logging", ".", "NOTSET", ")", "result", "=", "distances", ".", "index", "(", "min", "(", "distances", ")", ")", "self", ".", "unused_colors", ".", "discard", "(", "self", ".", "xlwt_colors", "[", "result", "]", ")", "return", "result" ]
39.166667
12.333333
def bind_super(self, opr): """ 为超级管理员授权所有权限 """ for path in self.routes: route = self.routes.get(path) route['oprs'].append(opr)
[ "def", "bind_super", "(", "self", ",", "opr", ")", ":", "for", "path", "in", "self", ".", "routes", ":", "route", "=", "self", ".", "routes", ".", "get", "(", "path", ")", "route", "[", "'oprs'", "]", ".", "append", "(", "opr", ")" ]
28.5
4.333333
def _get_member_file_data(member_data, id_filename=False): """ Helper function to get file data of member of a project. :param member_data: This field is data related to member in a project. """ file_data = {} for datafile in member_data['data']: if id_filename: basename = '{}.{}'.format(datafile['id'], datafile['basename']) else: basename = datafile['basename'] if (basename not in file_data or arrow.get(datafile['created']) > arrow.get(file_data[basename]['created'])): file_data[basename] = datafile return file_data
[ "def", "_get_member_file_data", "(", "member_data", ",", "id_filename", "=", "False", ")", ":", "file_data", "=", "{", "}", "for", "datafile", "in", "member_data", "[", "'data'", "]", ":", "if", "id_filename", ":", "basename", "=", "'{}.{}'", ".", "format", "(", "datafile", "[", "'id'", "]", ",", "datafile", "[", "'basename'", "]", ")", "else", ":", "basename", "=", "datafile", "[", "'basename'", "]", "if", "(", "basename", "not", "in", "file_data", "or", "arrow", ".", "get", "(", "datafile", "[", "'created'", "]", ")", ">", "arrow", ".", "get", "(", "file_data", "[", "basename", "]", "[", "'created'", "]", ")", ")", ":", "file_data", "[", "basename", "]", "=", "datafile", "return", "file_data" ]
40.411765
16.764706
def set_border(self, thickness, color="black"): """ Sets the border thickness and color. :param int thickness: The thickenss of the border. :param str color: The color of the border. """ self._set_tk_config("highlightthickness", thickness) self._set_tk_config("highlightbackground", utils.convert_color(color))
[ "def", "set_border", "(", "self", ",", "thickness", ",", "color", "=", "\"black\"", ")", ":", "self", ".", "_set_tk_config", "(", "\"highlightthickness\"", ",", "thickness", ")", "self", ".", "_set_tk_config", "(", "\"highlightbackground\"", ",", "utils", ".", "convert_color", "(", "color", ")", ")" ]
31.75
14.916667
def _rainbow_lines( self, text, freq=0.1, spread=3.0, offset=0, movefactor=0, rgb_mode=False, **colorargs): """ Create rainbow text, using the same offset for each line. Arguments: text : String to colorize. freq : Frequency/"tightness" of colors in the rainbow. Best results when in the range 0.0-1.0. Default: 0.1 spread : Spread/width of colors. Default: 3.0 offset : Offset for start of rainbow. Default: 0 movefactor : Factor for offset increase on each new line. Default: 0 rgb_mode : If truthy, use RGB escape codes instead of extended 256 and approximate hex match. Keyword Arguments: fore, back, style : Other args for the color() function. """ if not movefactor: def factor(i): return offset else: # Increase the offset for each line. def factor(i): return offset + (i * movefactor) return '\n'.join( self._rainbow_line( line, freq=freq, spread=spread, offset=factor(i), rgb_mode=rgb_mode, **colorargs) for i, line in enumerate(text.splitlines()))
[ "def", "_rainbow_lines", "(", "self", ",", "text", ",", "freq", "=", "0.1", ",", "spread", "=", "3.0", ",", "offset", "=", "0", ",", "movefactor", "=", "0", ",", "rgb_mode", "=", "False", ",", "*", "*", "colorargs", ")", ":", "if", "not", "movefactor", ":", "def", "factor", "(", "i", ")", ":", "return", "offset", "else", ":", "# Increase the offset for each line.", "def", "factor", "(", "i", ")", ":", "return", "offset", "+", "(", "i", "*", "movefactor", ")", "return", "'\\n'", ".", "join", "(", "self", ".", "_rainbow_line", "(", "line", ",", "freq", "=", "freq", ",", "spread", "=", "spread", ",", "offset", "=", "factor", "(", "i", ")", ",", "rgb_mode", "=", "rgb_mode", ",", "*", "*", "colorargs", ")", "for", "i", ",", "line", "in", "enumerate", "(", "text", ".", "splitlines", "(", ")", ")", ")" ]
40.72973
14.945946
def destroy_vm(self, vm, logger): """ destroy the given vm :param vm: virutal machine pyvmomi object :param logger: """ self.power_off_before_destroy(logger, vm) logger.info(("Destroying VM {0}".format(vm.name))) task = vm.Destroy_Task() return self.task_waiter.wait_for_task(task=task, logger=logger, action_name="Destroy VM")
[ "def", "destroy_vm", "(", "self", ",", "vm", ",", "logger", ")", ":", "self", ".", "power_off_before_destroy", "(", "logger", ",", "vm", ")", "logger", ".", "info", "(", "(", "\"Destroying VM {0}\"", ".", "format", "(", "vm", ".", "name", ")", ")", ")", "task", "=", "vm", ".", "Destroy_Task", "(", ")", "return", "self", ".", "task_waiter", ".", "wait_for_task", "(", "task", "=", "task", ",", "logger", "=", "logger", ",", "action_name", "=", "\"Destroy VM\"", ")" ]
30.230769
19.692308
def FoldByteStream(self, mapped_value, **unused_kwargs): # pylint: disable=redundant-returns-doc """Folds the data type into a byte stream. Args: mapped_value (object): mapped value. Returns: bytes: byte stream. Raises: FoldingError: if the data type definition cannot be folded into the byte stream. """ raise errors.FoldingError( 'Unable to fold {0:s} data type into byte stream'.format( self._data_type_definition.TYPE_INDICATOR))
[ "def", "FoldByteStream", "(", "self", ",", "mapped_value", ",", "*", "*", "unused_kwargs", ")", ":", "# pylint: disable=redundant-returns-doc", "raise", "errors", ".", "FoldingError", "(", "'Unable to fold {0:s} data type into byte stream'", ".", "format", "(", "self", ".", "_data_type_definition", ".", "TYPE_INDICATOR", ")", ")" ]
30.875
23.4375
def is_correct(self): """Check if the Daterange is correct : weekdays are valid :return: True if weekdays are valid, False otherwise :rtype: bool """ valid = True valid &= self.swday in range(7) if not valid: logger.error("Error: %s is not a valid day", self.swday) valid &= self.ewday in range(7) if not valid: logger.error("Error: %s is not a valid day", self.ewday) return valid
[ "def", "is_correct", "(", "self", ")", ":", "valid", "=", "True", "valid", "&=", "self", ".", "swday", "in", "range", "(", "7", ")", "if", "not", "valid", ":", "logger", ".", "error", "(", "\"Error: %s is not a valid day\"", ",", "self", ".", "swday", ")", "valid", "&=", "self", ".", "ewday", "in", "range", "(", "7", ")", "if", "not", "valid", ":", "logger", ".", "error", "(", "\"Error: %s is not a valid day\"", ",", "self", ".", "ewday", ")", "return", "valid" ]
29.5625
19.6875
def _build_command(self): """ Command to start the Dynamips hypervisor process. (to be passed to subprocess.Popen()) """ command = [self._path] command.extend(["-N1"]) # use instance IDs for filenames command.extend(["-l", "dynamips_i{}_log.txt".format(self._id)]) # log file # Dynamips cannot listen for hypervisor commands and for console connections on # 2 different IP addresses. # See https://github.com/GNS3/dynamips/issues/62 if self._console_host != "0.0.0.0" and self._console_host != "::": command.extend(["-H", "{}:{}".format(self._host, self._port)]) else: command.extend(["-H", str(self._port)]) return command
[ "def", "_build_command", "(", "self", ")", ":", "command", "=", "[", "self", ".", "_path", "]", "command", ".", "extend", "(", "[", "\"-N1\"", "]", ")", "# use instance IDs for filenames", "command", ".", "extend", "(", "[", "\"-l\"", ",", "\"dynamips_i{}_log.txt\"", ".", "format", "(", "self", ".", "_id", ")", "]", ")", "# log file", "# Dynamips cannot listen for hypervisor commands and for console connections on", "# 2 different IP addresses.", "# See https://github.com/GNS3/dynamips/issues/62", "if", "self", ".", "_console_host", "!=", "\"0.0.0.0\"", "and", "self", ".", "_console_host", "!=", "\"::\"", ":", "command", ".", "extend", "(", "[", "\"-H\"", ",", "\"{}:{}\"", ".", "format", "(", "self", ".", "_host", ",", "self", ".", "_port", ")", "]", ")", "else", ":", "command", ".", "extend", "(", "[", "\"-H\"", ",", "str", "(", "self", ".", "_port", ")", "]", ")", "return", "command" ]
43.411765
20.352941
def _extract_log_probs(num_states, dist): """Tabulate log probabilities from a batch of distributions.""" states = tf.reshape(tf.range(num_states), tf.concat([[num_states], tf.ones_like(dist.batch_shape_tensor())], axis=0)) return distribution_util.move_dimension(dist.log_prob(states), 0, -1)
[ "def", "_extract_log_probs", "(", "num_states", ",", "dist", ")", ":", "states", "=", "tf", ".", "reshape", "(", "tf", ".", "range", "(", "num_states", ")", ",", "tf", ".", "concat", "(", "[", "[", "num_states", "]", ",", "tf", ".", "ones_like", "(", "dist", ".", "batch_shape_tensor", "(", ")", ")", "]", ",", "axis", "=", "0", ")", ")", "return", "distribution_util", ".", "move_dimension", "(", "dist", ".", "log_prob", "(", "states", ")", ",", "0", ",", "-", "1", ")" ]
47.5
14.375
def image(self, height=1, module_width=1, add_quiet_zone=True): """Get the barcode as PIL.Image. By default the image is one pixel high and the number of modules pixels wide, with 10 empty modules added to each side to act as the quiet zone. The size can be modified by setting height and module_width, but if used in a web page it might be a good idea to do the scaling on client side. :param height: Height of the image in number of pixels. :param module_width: A multiplier for the width. :param add_quiet_zone: Whether to add 10 empty modules to each side of the barcode. :rtype: PIL.Image :return: A monochromatic image containing the barcode as black bars on white background. """ if Image is None: raise Code128.MissingDependencyError("PIL module is required to use image method.") modules = list(self.modules) if add_quiet_zone: # Add ten space modules to each side of the barcode. modules = [1] * self.quiet_zone + modules + [1] * self.quiet_zone width = len(modules) img = Image.new(mode='1', size=(width, 1)) img.putdata(modules) if height == 1 and module_width == 1: return img else: new_size = (width * module_width, height) return img.resize(new_size, resample=Image.NEAREST)
[ "def", "image", "(", "self", ",", "height", "=", "1", ",", "module_width", "=", "1", ",", "add_quiet_zone", "=", "True", ")", ":", "if", "Image", "is", "None", ":", "raise", "Code128", ".", "MissingDependencyError", "(", "\"PIL module is required to use image method.\"", ")", "modules", "=", "list", "(", "self", ".", "modules", ")", "if", "add_quiet_zone", ":", "# Add ten space modules to each side of the barcode.", "modules", "=", "[", "1", "]", "*", "self", ".", "quiet_zone", "+", "modules", "+", "[", "1", "]", "*", "self", ".", "quiet_zone", "width", "=", "len", "(", "modules", ")", "img", "=", "Image", ".", "new", "(", "mode", "=", "'1'", ",", "size", "=", "(", "width", ",", "1", ")", ")", "img", ".", "putdata", "(", "modules", ")", "if", "height", "==", "1", "and", "module_width", "==", "1", ":", "return", "img", "else", ":", "new_size", "=", "(", "width", "*", "module_width", ",", "height", ")", "return", "img", ".", "resize", "(", "new_size", ",", "resample", "=", "Image", ".", "NEAREST", ")" ]
44.548387
28.516129
def main(): '''Calculate the distance of an object in centimeters using a HCSR04 sensor and a Raspberry Pi. This script allows for a quicker reading by decreasing the number of samples and forcing the readings to be taken at quicker intervals.''' trig_pin = 17 echo_pin = 27 # Create a distance reading with the hcsr04 sensor module value = sensor.Measurement(trig_pin, echo_pin) # The default sample_size is 11 and sample_wait is 0.1 # Increase speed by lowering the sample_size and sample_wait # The effect of reducing sample_size is larger variance in readings # The effect of lowering sample_wait is higher cpu usage and sensor # instability if you push it with too fast of a value. # These two options have been added to allow you to tweak a # more optimal setting for your application. # e.g. raw_measurement = value.raw_distance(sample_size=5, sample_wait=0.03) # Calculate the distance in centimeters metric_distance = value.distance_metric(raw_measurement) print("The Distance = {} centimeters".format(metric_distance))
[ "def", "main", "(", ")", ":", "trig_pin", "=", "17", "echo_pin", "=", "27", "# Create a distance reading with the hcsr04 sensor module", "value", "=", "sensor", ".", "Measurement", "(", "trig_pin", ",", "echo_pin", ")", "# The default sample_size is 11 and sample_wait is 0.1", "# Increase speed by lowering the sample_size and sample_wait", "# The effect of reducing sample_size is larger variance in readings", "# The effect of lowering sample_wait is higher cpu usage and sensor", "# instability if you push it with too fast of a value.", "# These two options have been added to allow you to tweak a ", "# more optimal setting for your application.", "# e.g.", "raw_measurement", "=", "value", ".", "raw_distance", "(", "sample_size", "=", "5", ",", "sample_wait", "=", "0.03", ")", "# Calculate the distance in centimeters", "metric_distance", "=", "value", ".", "distance_metric", "(", "raw_measurement", ")", "print", "(", "\"The Distance = {} centimeters\"", ".", "format", "(", "metric_distance", ")", ")" ]
42.269231
25.961538
def resource_of_node(resources, node): """ Returns resource of node. """ for resource in resources: model = getattr(resource, 'model', None) if type(node) == model: return resource return BasePageResource
[ "def", "resource_of_node", "(", "resources", ",", "node", ")", ":", "for", "resource", "in", "resources", ":", "model", "=", "getattr", "(", "resource", ",", "'model'", ",", "None", ")", "if", "type", "(", "node", ")", "==", "model", ":", "return", "resource", "return", "BasePageResource" ]
30.125
6.875
def update_membercard(self, code, card_id, **kwargs): """ 更新会员信息 详情请参见 https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1451025283 注意事项: 1.开发者可以同时传入add_bonus和bonus解决由于同步失败带来的幂等性问题。同时传入add_bonus和bonus时 add_bonus作为积分变动消息中的变量值,而bonus作为卡面上的总积分额度显示。余额变动同理。 2.开发者可以传入is_notify_bonus控制特殊的积分对账变动不发送消息,余额变动同理。 参数示例: { "code": "179011264953", "card_id": "p1Pj9jr90_SQRaVqYI239Ka1erkI", "background_pic_url": "https://mmbiz.qlogo.cn/mmbiz/0?wx_fmt=jpeg", "record_bonus": "消费30元,获得3积分", "bonus": 3000, "add_bonus": 30, "balance": 3000, "add_balance": -30, "record_balance": "购买焦糖玛琪朵一杯,扣除金额30元。", "custom_field_value1": "xxxxx", "custom_field_value2": "xxxxx", "notify_optional": { "is_notify_bonus": true, "is_notify_balance": true, "is_notify_custom_field1":true } } 返回示例: { "errcode": 0, "errmsg": "ok", "result_bonus": 100, "result_balance": 200, "openid": "oFS7Fjl0WsZ9AMZqrI80nbIq8xrA" } :param code: 必填,卡券Code码 :param card_id: 必填,卡券ID :param kwargs: 其他非必填字段,包含则更新对应字段。详情参见微信文档 “7 更新会员信息” 部分 :return: 参见返回示例 """ kwargs.update({ 'code': code, 'card_id': card_id, }) return self._post( 'card/membercard/updateuser', data=kwargs )
[ "def", "update_membercard", "(", "self", ",", "code", ",", "card_id", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "{", "'code'", ":", "code", ",", "'card_id'", ":", "card_id", ",", "}", ")", "return", "self", ".", "_post", "(", "'card/membercard/updateuser'", ",", "data", "=", "kwargs", ")" ]
29.773585
17.849057
def process(self): """ This method handles the actual processing of Modules and Transforms """ self.modules.sort(key=lambda x: x.priority) for module in self.modules: transforms = module.transform(self.data) transforms.sort(key=lambda x: x.linenum, reverse=True) for transform in transforms: linenum = transform.linenum if isinstance(transform.data, basestring): transform.data = [transform.data] if transform.oper == "prepend": self.data[linenum:linenum] = transform.data elif transform.oper == "append": self.data[linenum+1:linenum+1] = transform.data elif transform.oper == "swap": self.data[linenum:linenum+1] = transform.data elif transform.oper == "drop": self.data[linenum:linenum+1] = [] elif transform.oper == "noop": pass
[ "def", "process", "(", "self", ")", ":", "self", ".", "modules", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", ".", "priority", ")", "for", "module", "in", "self", ".", "modules", ":", "transforms", "=", "module", ".", "transform", "(", "self", ".", "data", ")", "transforms", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", ".", "linenum", ",", "reverse", "=", "True", ")", "for", "transform", "in", "transforms", ":", "linenum", "=", "transform", ".", "linenum", "if", "isinstance", "(", "transform", ".", "data", ",", "basestring", ")", ":", "transform", ".", "data", "=", "[", "transform", ".", "data", "]", "if", "transform", ".", "oper", "==", "\"prepend\"", ":", "self", ".", "data", "[", "linenum", ":", "linenum", "]", "=", "transform", ".", "data", "elif", "transform", ".", "oper", "==", "\"append\"", ":", "self", ".", "data", "[", "linenum", "+", "1", ":", "linenum", "+", "1", "]", "=", "transform", ".", "data", "elif", "transform", ".", "oper", "==", "\"swap\"", ":", "self", ".", "data", "[", "linenum", ":", "linenum", "+", "1", "]", "=", "transform", ".", "data", "elif", "transform", ".", "oper", "==", "\"drop\"", ":", "self", ".", "data", "[", "linenum", ":", "linenum", "+", "1", "]", "=", "[", "]", "elif", "transform", ".", "oper", "==", "\"noop\"", ":", "pass" ]
33.933333
20.066667
def file_change_history(self, branch='master', limit=None, days=None, ignore_globs=None, include_globs=None): """ Returns a DataFrame of all file changes (via the commit history) for the specified branch. This is similar to the commit history DataFrame, but is one row per file edit rather than one row per commit (which may encapsulate many file changes). Included in the DataFrame will be the columns: * date (index) * author * committer * message * filename * insertions * deletions :param branch: the branch to return commits for :param limit: (optional, default=None) a maximum number of commits to return, None for no limit :param days: (optional, default=None) number of days to return if limit is None :param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing :param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything. :return: DataFrame """ # setup the dataset of commits if limit is None: if days is None: ds = [[ x.author.name, x.committer.name, x.committed_date, x.message, x.name_rev.split()[0], self.__check_extension(x.stats.files, ignore_globs=ignore_globs, include_globs=include_globs) ] for x in self.repo.iter_commits(branch, max_count=sys.maxsize)] else: ds = [] c_date = time.time() commits = self.repo.iter_commits(branch, max_count=sys.maxsize) dlim = time.time() - days * 24 * 3600 while c_date > dlim: try: if sys.version_info.major == 2: x = commits.next() else: x = commits.__next__() except StopIteration: break c_date = x.committed_date if c_date > dlim: ds.append([ x.author.name, x.committer.name, x.committed_date, x.message, x.name_rev.split()[0], self.__check_extension(x.stats.files, ignore_globs=ignore_globs, include_globs=include_globs) ]) else: ds = [[ x.author.name, x.committer.name, x.committed_date, x.message, x.name_rev.split()[0], self.__check_extension(x.stats.files, ignore_globs=ignore_globs, include_globs=include_globs) ] for x in self.repo.iter_commits(branch, max_count=limit)] ds = [x[:-1] + [fn, x[-1][fn]['insertions'], x[-1][fn]['deletions']] for x in ds for fn in x[-1].keys() if len(x[-1].keys()) > 0] # make it a pandas dataframe df = DataFrame(ds, columns=['author', 'committer', 'date', 'message', 'rev', 'filename', 'insertions', 'deletions']) # format the date col and make it the index df['date'] = to_datetime(df['date'].map(datetime.datetime.fromtimestamp)) df.set_index(keys=['date'], drop=True, inplace=True) return df
[ "def", "file_change_history", "(", "self", ",", "branch", "=", "'master'", ",", "limit", "=", "None", ",", "days", "=", "None", ",", "ignore_globs", "=", "None", ",", "include_globs", "=", "None", ")", ":", "# setup the dataset of commits", "if", "limit", "is", "None", ":", "if", "days", "is", "None", ":", "ds", "=", "[", "[", "x", ".", "author", ".", "name", ",", "x", ".", "committer", ".", "name", ",", "x", ".", "committed_date", ",", "x", ".", "message", ",", "x", ".", "name_rev", ".", "split", "(", ")", "[", "0", "]", ",", "self", ".", "__check_extension", "(", "x", ".", "stats", ".", "files", ",", "ignore_globs", "=", "ignore_globs", ",", "include_globs", "=", "include_globs", ")", "]", "for", "x", "in", "self", ".", "repo", ".", "iter_commits", "(", "branch", ",", "max_count", "=", "sys", ".", "maxsize", ")", "]", "else", ":", "ds", "=", "[", "]", "c_date", "=", "time", ".", "time", "(", ")", "commits", "=", "self", ".", "repo", ".", "iter_commits", "(", "branch", ",", "max_count", "=", "sys", ".", "maxsize", ")", "dlim", "=", "time", ".", "time", "(", ")", "-", "days", "*", "24", "*", "3600", "while", "c_date", ">", "dlim", ":", "try", ":", "if", "sys", ".", "version_info", ".", "major", "==", "2", ":", "x", "=", "commits", ".", "next", "(", ")", "else", ":", "x", "=", "commits", ".", "__next__", "(", ")", "except", "StopIteration", ":", "break", "c_date", "=", "x", ".", "committed_date", "if", "c_date", ">", "dlim", ":", "ds", ".", "append", "(", "[", "x", ".", "author", ".", "name", ",", "x", ".", "committer", ".", "name", ",", "x", ".", "committed_date", ",", "x", ".", "message", ",", "x", ".", "name_rev", ".", "split", "(", ")", "[", "0", "]", ",", "self", ".", "__check_extension", "(", "x", ".", "stats", ".", "files", ",", "ignore_globs", "=", "ignore_globs", ",", "include_globs", "=", "include_globs", ")", "]", ")", "else", ":", "ds", "=", "[", "[", "x", ".", "author", ".", "name", ",", "x", ".", "committer", ".", "name", ",", "x", ".", "committed_date", ",", "x", ".", "message", ",", "x", ".", "name_rev", ".", "split", "(", ")", "[", "0", "]", ",", "self", ".", "__check_extension", "(", "x", ".", "stats", ".", "files", ",", "ignore_globs", "=", "ignore_globs", ",", "include_globs", "=", "include_globs", ")", "]", "for", "x", "in", "self", ".", "repo", ".", "iter_commits", "(", "branch", ",", "max_count", "=", "limit", ")", "]", "ds", "=", "[", "x", "[", ":", "-", "1", "]", "+", "[", "fn", ",", "x", "[", "-", "1", "]", "[", "fn", "]", "[", "'insertions'", "]", ",", "x", "[", "-", "1", "]", "[", "fn", "]", "[", "'deletions'", "]", "]", "for", "x", "in", "ds", "for", "fn", "in", "x", "[", "-", "1", "]", ".", "keys", "(", ")", "if", "len", "(", "x", "[", "-", "1", "]", ".", "keys", "(", ")", ")", ">", "0", "]", "# make it a pandas dataframe", "df", "=", "DataFrame", "(", "ds", ",", "columns", "=", "[", "'author'", ",", "'committer'", ",", "'date'", ",", "'message'", ",", "'rev'", ",", "'filename'", ",", "'insertions'", ",", "'deletions'", "]", ")", "# format the date col and make it the index", "df", "[", "'date'", "]", "=", "to_datetime", "(", "df", "[", "'date'", "]", ".", "map", "(", "datetime", ".", "datetime", ".", "fromtimestamp", ")", ")", "df", ".", "set_index", "(", "keys", "=", "[", "'date'", "]", ",", "drop", "=", "True", ",", "inplace", "=", "True", ")", "return", "df" ]
44.654321
24.703704
def create(self, recording_status_callback_event=values.unset, recording_status_callback=values.unset, recording_status_callback_method=values.unset, trim=values.unset, recording_channels=values.unset): """ Create a new RecordingInstance :param unicode recording_status_callback_event: The recording status changes that should generate a callback :param unicode recording_status_callback: The callback URL on each selected recording event :param unicode recording_status_callback_method: The HTTP method we should use to call `recording_status_callback` :param unicode trim: Whether to trim the silence in the recording :param unicode recording_channels: The number of channels that the output recording will be configured with :returns: Newly created RecordingInstance :rtype: twilio.rest.api.v2010.account.call.recording.RecordingInstance """ data = values.of({ 'RecordingStatusCallbackEvent': serialize.map(recording_status_callback_event, lambda e: e), 'RecordingStatusCallback': recording_status_callback, 'RecordingStatusCallbackMethod': recording_status_callback_method, 'Trim': trim, 'RecordingChannels': recording_channels, }) payload = self._version.create( 'POST', self._uri, data=data, ) return RecordingInstance( self._version, payload, account_sid=self._solution['account_sid'], call_sid=self._solution['call_sid'], )
[ "def", "create", "(", "self", ",", "recording_status_callback_event", "=", "values", ".", "unset", ",", "recording_status_callback", "=", "values", ".", "unset", ",", "recording_status_callback_method", "=", "values", ".", "unset", ",", "trim", "=", "values", ".", "unset", ",", "recording_channels", "=", "values", ".", "unset", ")", ":", "data", "=", "values", ".", "of", "(", "{", "'RecordingStatusCallbackEvent'", ":", "serialize", ".", "map", "(", "recording_status_callback_event", ",", "lambda", "e", ":", "e", ")", ",", "'RecordingStatusCallback'", ":", "recording_status_callback", ",", "'RecordingStatusCallbackMethod'", ":", "recording_status_callback_method", ",", "'Trim'", ":", "trim", ",", "'RecordingChannels'", ":", "recording_channels", ",", "}", ")", "payload", "=", "self", ".", "_version", ".", "create", "(", "'POST'", ",", "self", ".", "_uri", ",", "data", "=", "data", ",", ")", "return", "RecordingInstance", "(", "self", ".", "_version", ",", "payload", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]", ",", "call_sid", "=", "self", ".", "_solution", "[", "'call_sid'", "]", ",", ")" ]
44.916667
27.75
def process_schema(doc, resource, df): """Add schema entiries to a metatab doc from a dataframe""" from rowgenerators import SourceError from requests.exceptions import ConnectionError from metapack.cli.core import extract_path_name, alt_col_name, type_map from tableintuit import TypeIntuiter from rowgenerators.generator.python import PandasDataframeSource from appurl import parse_app_url try: doc['Schema'] except KeyError: doc.new_section('Schema', ['DataType', 'Altname', 'Description']) schema_name = resource.get_value('schema', resource.get_value('name')) schema_term = doc.find_first(term='Table', value=schema_name, section='Schema') if schema_term: logger.info("Found table for '{}'; skipping".format(schema_name)) return path, name = extract_path_name(resource.url) logger.info("Processing {}".format(resource.url)) si = PandasDataframeSource(parse_app_url(resource.url), df, cache=doc._cache, ) try: ti = TypeIntuiter().run(si) except SourceError as e: logger.warn("Failed to process '{}'; {}".format(path, e)) return except ConnectionError as e: logger.warn("Failed to download '{}'; {}".format(path, e)) return table = doc['Schema'].new_term('Table', schema_name) logger.info("Adding table '{}' to metatab schema".format(schema_name)) for i, c in enumerate(ti.to_rows()): raw_alt_name = alt_col_name(c['header'], i) alt_name = raw_alt_name if raw_alt_name != c['header'] else '' t = table.new_child('Column', c['header'], datatype=type_map.get(c['resolved_type'], c['resolved_type']), altname=alt_name, description=df[c['header']].description \ if hasattr(df, 'description') and df[c['header']].description else '' ) return table
[ "def", "process_schema", "(", "doc", ",", "resource", ",", "df", ")", ":", "from", "rowgenerators", "import", "SourceError", "from", "requests", ".", "exceptions", "import", "ConnectionError", "from", "metapack", ".", "cli", ".", "core", "import", "extract_path_name", ",", "alt_col_name", ",", "type_map", "from", "tableintuit", "import", "TypeIntuiter", "from", "rowgenerators", ".", "generator", ".", "python", "import", "PandasDataframeSource", "from", "appurl", "import", "parse_app_url", "try", ":", "doc", "[", "'Schema'", "]", "except", "KeyError", ":", "doc", ".", "new_section", "(", "'Schema'", ",", "[", "'DataType'", ",", "'Altname'", ",", "'Description'", "]", ")", "schema_name", "=", "resource", ".", "get_value", "(", "'schema'", ",", "resource", ".", "get_value", "(", "'name'", ")", ")", "schema_term", "=", "doc", ".", "find_first", "(", "term", "=", "'Table'", ",", "value", "=", "schema_name", ",", "section", "=", "'Schema'", ")", "if", "schema_term", ":", "logger", ".", "info", "(", "\"Found table for '{}'; skipping\"", ".", "format", "(", "schema_name", ")", ")", "return", "path", ",", "name", "=", "extract_path_name", "(", "resource", ".", "url", ")", "logger", ".", "info", "(", "\"Processing {}\"", ".", "format", "(", "resource", ".", "url", ")", ")", "si", "=", "PandasDataframeSource", "(", "parse_app_url", "(", "resource", ".", "url", ")", ",", "df", ",", "cache", "=", "doc", ".", "_cache", ",", ")", "try", ":", "ti", "=", "TypeIntuiter", "(", ")", ".", "run", "(", "si", ")", "except", "SourceError", "as", "e", ":", "logger", ".", "warn", "(", "\"Failed to process '{}'; {}\"", ".", "format", "(", "path", ",", "e", ")", ")", "return", "except", "ConnectionError", "as", "e", ":", "logger", ".", "warn", "(", "\"Failed to download '{}'; {}\"", ".", "format", "(", "path", ",", "e", ")", ")", "return", "table", "=", "doc", "[", "'Schema'", "]", ".", "new_term", "(", "'Table'", ",", "schema_name", ")", "logger", ".", "info", "(", "\"Adding table '{}' to metatab schema\"", ".", "format", "(", "schema_name", ")", ")", "for", "i", ",", "c", "in", "enumerate", "(", "ti", ".", "to_rows", "(", ")", ")", ":", "raw_alt_name", "=", "alt_col_name", "(", "c", "[", "'header'", "]", ",", "i", ")", "alt_name", "=", "raw_alt_name", "if", "raw_alt_name", "!=", "c", "[", "'header'", "]", "else", "''", "t", "=", "table", ".", "new_child", "(", "'Column'", ",", "c", "[", "'header'", "]", ",", "datatype", "=", "type_map", ".", "get", "(", "c", "[", "'resolved_type'", "]", ",", "c", "[", "'resolved_type'", "]", ")", ",", "altname", "=", "alt_name", ",", "description", "=", "df", "[", "c", "[", "'header'", "]", "]", ".", "description", "if", "hasattr", "(", "df", ",", "'description'", ")", "and", "df", "[", "c", "[", "'header'", "]", "]", ".", "description", "else", "''", ")", "return", "table" ]
35.962963
26.203704
def find(self, *args, **kwargs): " new query builder on current db" return Query(*args, db=self, schema=self.schema)
[ "def", "find", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "Query", "(", "*", "args", ",", "db", "=", "self", ",", "schema", "=", "self", ".", "schema", ")" ]
40.666667
7.333333
def draw(title, subtitle, author, cover_width=400, cover_height=600): """ Main drawing function, which generates a cover of the given dimension and renders title, author, and graphics. """ # Based on some initial constants and the title+author strings, generate a base # background color and a shape color to draw onto the background. Try to keep # these two colors somewhat compatible with each other by varying only their hue. def processColors(): base_saturation = 100 base_brightness = 90 color_distance = 100 invert = True counts = len(title) + len(author) color_seed = int(_map(_clip(counts, 2, 80), 2, 80, 10, 360)) shape_color = Image.colorHSB(color_seed, base_saturation, base_brightness-(counts % 20)) base_color = Image.colorHSB(( color_seed + color_distance) % 360, base_saturation, base_brightness ) if invert: shape_color, base_color = base_color, shape_color if (counts % 10) == 0: shape_color, base_color = base_color, shape_color return shape_color, base_color # Fill the background of the image with white. def drawBackground(): fill = Image.colorRGB(255, 255, 255) cover_image.rect(0, 0, cover_width, cover_height, fill) # Draw the actual artwork for the cover. Given the length of the title string, # generate an appropriate sized grid and draw C64 PETSCII into each of the cells. # https://www.c64-wiki.com/index.php/PETSCII # https://en.wikipedia.org/wiki/PETSCII#/media/File:PET_Keyboard.svg def drawArtwork(): artwork_start_x = 0 artwork_start_y = cover_height - cover_width grid_count, grid_total, grid_size = breakGrid() cover_image.rect(0, 0, cover_width, cover_height * cover_margin / 100, base_color) cover_image.rect(0, 0 + artwork_start_y, cover_width, cover_width, base_color) c64_title = c64Convert() for c, i in zip(itertools.cycle(c64_title), range(0, grid_total)): grid_x = int(i % grid_count) grid_y = int(i / grid_count) x = grid_x * grid_size + artwork_start_x y = grid_y * grid_size + artwork_start_y drawShape(c, x, y, grid_size) # Compute the graphics grid size based on the length of the book title. def breakGrid(): min_title = 2 max_title = 60 length = _clip(len(title), min_title, max_title) grid_count = int(_map(length, min_title, max_title, 2, 11)) grid_total = grid_count * grid_count grid_size = cover_width / grid_count return grid_count, grid_total, grid_size # Given the title of the book, filter through its characters and ensure # that only a certain range is used for the title; characters outside of # that range are replaced with a somewhat random character. def c64Convert(): c64_letters = " qQwWeErRtTyYuUiIoOpPaAsSdDfFgGhHjJkKlL:zZxXcCvVbBnNmM,;?<>@[]1234567890.=-+*/" c64_title = "" for c in title: if c in c64_letters: c64_title += c else: c64_title += c64_letters[ord(c) % len(c64_letters)] return c64_title # Given an alphabetic character from the book's title string and the x, y # coordinates and size of the cell within the cover grid, draw a PETSCII # shape into that cell. def drawShape(c, x, y, s): shape_thickness = 10 thick = int(s * shape_thickness / 100) if c in "qQ": cover_image.ellipse(x, y, s, s, shape_color) elif c in "wW": cover_image.ellipse(x, y, s, s, shape_color) cover_image.ellipse(x+thick, y+thick, s-(thick*2), s-(thick*2), base_color) elif c in "eE": cover_image.rect(x, y+thick, s, thick, shape_color) elif c in "rR": cover_image.rect(x, y+s-(thick*2), s, thick, shape_color) elif c in "tT": cover_image.rect(x+thick, y, thick, s, shape_color) elif c in "yY": cover_image.rect(x+s-(thick*2), y, thick, s, shape_color) elif c in "uU": cover_image.arc(x, y, 2*s, 2*s, 180, 270, shape_color, thick) elif c in "iI": cover_image.arc(x-s, y, 2*s, 2*s, 270, 360, shape_color, thick) elif c in "oO": cover_image.rect(x, y, s, thick, shape_color) cover_image.rect(x, y, thick, s, shape_color) elif c in "pP": cover_image.rect(x, y, s, thick, shape_color) cover_image.rect(x+s-thick, y, thick, s, shape_color) elif c in "aA": cover_image.triangle(x, y+s, x+(s/2), y, x+s, y+s, shape_color) elif c in "sS": cover_image.triangle(x, y, x+(s/2), y+s, x+s, y, shape_color) elif c in "dD": cover_image.rect(x, y+(thick*2), s, thick, shape_color) elif c in "fF": cover_image.rect(x, y+s-(thick*3), s, thick, shape_color) elif c in "gG": cover_image.rect(x+(thick*2), y, thick, s, shape_color) elif c in "hH": cover_image.rect(x+s-(thick*3), y, thick, s, shape_color) elif c in "jJ": cover_image.arc(x, y-s, 2*s, 2*s, 90, 180, shape_color, thick) elif c in "kK": cover_image.arc(x-s, y-s, 2*s, 2*s, 0, 90, shape_color, thick) elif c in "lL": cover_image.rect(x, y, thick, s, shape_color) cover_image.rect(x, y+s-thick, s, thick, shape_color) elif c == ":": cover_image.rect(x+s-thick, y, thick, s, shape_color) cover_image.rect(x, y+s-thick, s, thick, shape_color) elif c in "zZ": cover_image.triangle(x, y+(s/2), x+(s/2), y, x+s, y+(s/2), shape_color) cover_image.triangle(x, y+(s/2), x+(s/2), y+s, x+s, y+(s/2), shape_color) elif c in "xX": cover_image.ellipse(x+(s/2), y+(s/3), thick*2, thick*2, shape_color) cover_image.ellipse(x+(s/3), y+s-(s/3), thick*2, thick*2, shape_color) cover_image.ellipse(x+s-(s/3), y+s-(s/3), thick*2, thick*2, shape_color) elif c in "cC": cover_image.rect(x, y+(thick*3), s, thick, shape_color) elif c in "vV": cover_image.rect(x, y, s, s, shape_color) cover_image.triangle(x+thick, y, x+(s/2), y+(s/2)-thick, x+s-thick, y, base_color) cover_image.triangle(x, y+thick, x+(s/2)-thick, y+(s/2), x, y+s-thick, base_color) cover_image.triangle(x+thick, y+s, x+(s/2), y+(s/2)+thick, x+s-thick, y+s, base_color) cover_image.triangle(x+s, y+thick, x+s, y+s-thick, x+(s/2)+thick, y+(s/2), base_color) elif c in "bB": cover_image.rect(x+(thick*3), y, thick, s, shape_color) elif c in "nN": cover_image.rect(x, y, s, s, shape_color) cover_image.triangle(x, y, x+s-thick, y, x, y+s-thick, base_color) cover_image.triangle(x+thick, y+s, x+s, y+s, x+s, y+thick, base_color) elif c in "mM": cover_image.rect(x, y, s, s, shape_color) cover_image.triangle(x+thick, y, x+s, y, x+s, y+s-thick, base_color) cover_image.triangle(x, y+thick, x, y+s, x+s-thick, y + s, base_color) elif c == ",": cover_image.rect(x+(s/2), y+(s/2), s/2, s/2, shape_color) elif c == ";": cover_image.rect(x, y+(s/2), s/2, s/2, shape_color) elif c == "?": cover_image.rect(x, y, s/2, s/2, shape_color) cover_image.rect(x+(s/2), y+(s/2), s/2, s/2, shape_color) elif c == "<": cover_image.rect(x+(s/2), y, s/2, s/2, shape_color) elif c == ">": cover_image.rect(x, y, s/2, s/2, shape_color) elif c == "@": cover_image.rect(x, y+(s/2)-(thick/2), s, thick, shape_color) elif c == "[": cover_image.rect(x+(s/2)-(thick/2), y, thick, s, shape_color) elif c == "]": cover_image.rect(x, y+(s/2)-(thick/2), s, thick, shape_color) cover_image.rect(x+(s/2)-(thick/2), y, thick, s, shape_color) elif c == "0": cover_image.rect(x+(s/2)-(thick/2), y+(s/2)-(thick/2), thick, s/2+thick/2, shape_color) cover_image.rect(x+(s/2)-(thick/2), y+(s/2)-(thick/2), s/2+thick/2, thick, shape_color) elif c == "1": cover_image.rect(x, y+(s/2)-(thick/2), s, thick, shape_color) cover_image.rect(x+(s/2)-(thick/2), y, thick, s/2+thick/2, shape_color) elif c == "2": cover_image.rect(x, y+(s/2)-(thick/2), s, thick, shape_color) cover_image.rect(x+(s/2)-(thick/2), y+(s/2)-(thick/2), thick, s/2+thick/2, shape_color) elif c == "3": cover_image.rect(x, y+(s/2)-(thick/2), s/2+thick/2, thick, shape_color) cover_image.rect(x+(s/2)-(thick/2), y, thick, s, shape_color) elif c == "4": cover_image.rect(x, y, thick*2, s, shape_color) elif c == "5": cover_image.rect(x, y, thick*3, s, shape_color) elif c == "6": cover_image.rect(x+s-(thick*3), y, thick*3, s, shape_color) elif c == "7": cover_image.rect(x, y, s, thick*2, shape_color) elif c == "8": cover_image.rect(x, y, s, thick*3, shape_color) elif c == "9": cover_image.rect(x, y+s-(thick*3), s, thick*3, shape_color) elif c == ".": cover_image.rect(x+(s/2)-(thick/2), y+(s/2)-(thick/2), thick, s/2+thick/2, shape_color) cover_image.rect(x, y+(s/2)-(thick/2), s/2+thick/2, thick, shape_color) elif c == "=": cover_image.rect(x+(s/2)-(thick/2), y, thick, s/2+thick/2, shape_color) cover_image.rect(x, y+(s/2)-(thick/2), s/2, thick, shape_color) elif c == "-": cover_image.rect(x+(s/2)-(thick/2), y, thick, s/2+thick/2, shape_color) cover_image.rect(x+(s/2)-(thick/2), y+(s/2)-(thick/2), s/2+thick/2, thick, shape_color) elif c == "+": cover_image.rect(x+(s/2)-(thick/2), y+(s/2)-(thick/2), s/2+thick/2, thick, shape_color) cover_image.rect(x+(s/2)-(thick/2), y, thick, s, shape_color) elif c == "*": cover_image.rect(x+s-(thick*2), y, thick*2, s, shape_color) elif c == "/": cover_image.rect(x, y+s-(thick*2), s, thick*2, shape_color) elif c == " ": cover_image.rect(x, y, s, s, base_color) else: assert not "Implement." # If the text is long, use a smaller font size. def scale_font(text, font_name, font_properties): (font_size, font_slant, font_weight) = font_properties w = len(text) * font_size if w > cover_width * 3: #This is an empirical, unintelligent, heuristic. return (font_size * 0.8, font_slant, font_weight) elif w < cover_width : return (font_size * 1.2, font_slant, font_weight) else: return font_properties # return a font appropriate for the text. Uses Noto CJK if text contains CJK, otherwise # Noto Sans. def select_font(text): for char in text: if ord(char) >= 0x4E00: return 'Noto Sans CJK SC' return 'Noto Sans' # Allocate fonts for the title and the author, and draw the text. def drawText(): fill = Image.colorRGB(50, 50, 50) title_font_size = cover_width * 0.08 subtitle_font_size = cover_width * 0.05 title_font_properties = (title_font_size, cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD) subtitle_font_properties = (subtitle_font_size, cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL) title_font_family = select_font(title) subtitle_font_family = select_font(subtitle) title_font_properties = scale_font(title, title_font_family, title_font_properties) subtitle_font_properties = scale_font( subtitle, subtitle_font_family, subtitle_font_properties ) title_font = cover_image.font(title_font_family, title_font_properties) subtitle_font = cover_image.font(subtitle_font_family, subtitle_font_properties) title_height = (cover_height - cover_width - (cover_height * cover_margin / 100)) * 0.75 x = cover_height * cover_margin / 100 y = cover_height * cover_margin / 100 * 2 width = cover_width - (2 * cover_height * cover_margin / 100) height = title_height title_lines, font_height = cover_image.text(title, x, y, width, height, fill, title_font) if subtitle: y = min( y + font_height * title_lines * cover_height, title_height - subtitle_font_properties[0] ) cover_image.text(subtitle, x, y, width, height, fill, subtitle_font) author_font_size = cover_width * 0.07 author_font_properties = ( author_font_size, cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL ) author_font = cover_image.font(select_font(author), author_font_properties) author_height = (cover_height - cover_width - (cover_height * cover_margin / 100)) * 0.25 x = cover_height * cover_margin / 100 y = title_height width = cover_width - (2 * cover_height * cover_margin / 100) height = author_height cover_image.text(author, x, y, width, height, fill, author_font) # Create the new cover image. cover_margin = 2 cover_image = Image(cover_width, cover_height) # Draw the book cover. shape_color, base_color = processColors() drawBackground() drawArtwork() drawText() # Return the cover Image instance. return cover_image
[ "def", "draw", "(", "title", ",", "subtitle", ",", "author", ",", "cover_width", "=", "400", ",", "cover_height", "=", "600", ")", ":", "# Based on some initial constants and the title+author strings, generate a base", "# background color and a shape color to draw onto the background. Try to keep", "# these two colors somewhat compatible with each other by varying only their hue.", "def", "processColors", "(", ")", ":", "base_saturation", "=", "100", "base_brightness", "=", "90", "color_distance", "=", "100", "invert", "=", "True", "counts", "=", "len", "(", "title", ")", "+", "len", "(", "author", ")", "color_seed", "=", "int", "(", "_map", "(", "_clip", "(", "counts", ",", "2", ",", "80", ")", ",", "2", ",", "80", ",", "10", ",", "360", ")", ")", "shape_color", "=", "Image", ".", "colorHSB", "(", "color_seed", ",", "base_saturation", ",", "base_brightness", "-", "(", "counts", "%", "20", ")", ")", "base_color", "=", "Image", ".", "colorHSB", "(", "(", "color_seed", "+", "color_distance", ")", "%", "360", ",", "base_saturation", ",", "base_brightness", ")", "if", "invert", ":", "shape_color", ",", "base_color", "=", "base_color", ",", "shape_color", "if", "(", "counts", "%", "10", ")", "==", "0", ":", "shape_color", ",", "base_color", "=", "base_color", ",", "shape_color", "return", "shape_color", ",", "base_color", "# Fill the background of the image with white.", "def", "drawBackground", "(", ")", ":", "fill", "=", "Image", ".", "colorRGB", "(", "255", ",", "255", ",", "255", ")", "cover_image", ".", "rect", "(", "0", ",", "0", ",", "cover_width", ",", "cover_height", ",", "fill", ")", "# Draw the actual artwork for the cover. Given the length of the title string,", "# generate an appropriate sized grid and draw C64 PETSCII into each of the cells.", "# https://www.c64-wiki.com/index.php/PETSCII", "# https://en.wikipedia.org/wiki/PETSCII#/media/File:PET_Keyboard.svg", "def", "drawArtwork", "(", ")", ":", "artwork_start_x", "=", "0", "artwork_start_y", "=", "cover_height", "-", "cover_width", "grid_count", ",", "grid_total", ",", "grid_size", "=", "breakGrid", "(", ")", "cover_image", ".", "rect", "(", "0", ",", "0", ",", "cover_width", ",", "cover_height", "*", "cover_margin", "/", "100", ",", "base_color", ")", "cover_image", ".", "rect", "(", "0", ",", "0", "+", "artwork_start_y", ",", "cover_width", ",", "cover_width", ",", "base_color", ")", "c64_title", "=", "c64Convert", "(", ")", "for", "c", ",", "i", "in", "zip", "(", "itertools", ".", "cycle", "(", "c64_title", ")", ",", "range", "(", "0", ",", "grid_total", ")", ")", ":", "grid_x", "=", "int", "(", "i", "%", "grid_count", ")", "grid_y", "=", "int", "(", "i", "/", "grid_count", ")", "x", "=", "grid_x", "*", "grid_size", "+", "artwork_start_x", "y", "=", "grid_y", "*", "grid_size", "+", "artwork_start_y", "drawShape", "(", "c", ",", "x", ",", "y", ",", "grid_size", ")", "# Compute the graphics grid size based on the length of the book title.", "def", "breakGrid", "(", ")", ":", "min_title", "=", "2", "max_title", "=", "60", "length", "=", "_clip", "(", "len", "(", "title", ")", ",", "min_title", ",", "max_title", ")", "grid_count", "=", "int", "(", "_map", "(", "length", ",", "min_title", ",", "max_title", ",", "2", ",", "11", ")", ")", "grid_total", "=", "grid_count", "*", "grid_count", "grid_size", "=", "cover_width", "/", "grid_count", "return", "grid_count", ",", "grid_total", ",", "grid_size", "# Given the title of the book, filter through its characters and ensure", "# that only a certain range is used for the title; characters outside of", "# that range are replaced with a somewhat random character.", "def", "c64Convert", "(", ")", ":", "c64_letters", "=", "\" qQwWeErRtTyYuUiIoOpPaAsSdDfFgGhHjJkKlL:zZxXcCvVbBnNmM,;?<>@[]1234567890.=-+*/\"", "c64_title", "=", "\"\"", "for", "c", "in", "title", ":", "if", "c", "in", "c64_letters", ":", "c64_title", "+=", "c", "else", ":", "c64_title", "+=", "c64_letters", "[", "ord", "(", "c", ")", "%", "len", "(", "c64_letters", ")", "]", "return", "c64_title", "# Given an alphabetic character from the book's title string and the x, y", "# coordinates and size of the cell within the cover grid, draw a PETSCII", "# shape into that cell.", "def", "drawShape", "(", "c", ",", "x", ",", "y", ",", "s", ")", ":", "shape_thickness", "=", "10", "thick", "=", "int", "(", "s", "*", "shape_thickness", "/", "100", ")", "if", "c", "in", "\"qQ\"", ":", "cover_image", ".", "ellipse", "(", "x", ",", "y", ",", "s", ",", "s", ",", "shape_color", ")", "elif", "c", "in", "\"wW\"", ":", "cover_image", ".", "ellipse", "(", "x", ",", "y", ",", "s", ",", "s", ",", "shape_color", ")", "cover_image", ".", "ellipse", "(", "x", "+", "thick", ",", "y", "+", "thick", ",", "s", "-", "(", "thick", "*", "2", ")", ",", "s", "-", "(", "thick", "*", "2", ")", ",", "base_color", ")", "elif", "c", "in", "\"eE\"", ":", "cover_image", ".", "rect", "(", "x", ",", "y", "+", "thick", ",", "s", ",", "thick", ",", "shape_color", ")", "elif", "c", "in", "\"rR\"", ":", "cover_image", ".", "rect", "(", "x", ",", "y", "+", "s", "-", "(", "thick", "*", "2", ")", ",", "s", ",", "thick", ",", "shape_color", ")", "elif", "c", "in", "\"tT\"", ":", "cover_image", ".", "rect", "(", "x", "+", "thick", ",", "y", ",", "thick", ",", "s", ",", "shape_color", ")", "elif", "c", "in", "\"yY\"", ":", "cover_image", ".", "rect", "(", "x", "+", "s", "-", "(", "thick", "*", "2", ")", ",", "y", ",", "thick", ",", "s", ",", "shape_color", ")", "elif", "c", "in", "\"uU\"", ":", "cover_image", ".", "arc", "(", "x", ",", "y", ",", "2", "*", "s", ",", "2", "*", "s", ",", "180", ",", "270", ",", "shape_color", ",", "thick", ")", "elif", "c", "in", "\"iI\"", ":", "cover_image", ".", "arc", "(", "x", "-", "s", ",", "y", ",", "2", "*", "s", ",", "2", "*", "s", ",", "270", ",", "360", ",", "shape_color", ",", "thick", ")", "elif", "c", "in", "\"oO\"", ":", "cover_image", ".", "rect", "(", "x", ",", "y", ",", "s", ",", "thick", ",", "shape_color", ")", "cover_image", ".", "rect", "(", "x", ",", "y", ",", "thick", ",", "s", ",", "shape_color", ")", "elif", "c", "in", "\"pP\"", ":", "cover_image", ".", "rect", "(", "x", ",", "y", ",", "s", ",", "thick", ",", "shape_color", ")", "cover_image", ".", "rect", "(", "x", "+", "s", "-", "thick", ",", "y", ",", "thick", ",", "s", ",", "shape_color", ")", "elif", "c", "in", "\"aA\"", ":", "cover_image", ".", "triangle", "(", "x", ",", "y", "+", "s", ",", "x", "+", "(", "s", "/", "2", ")", ",", "y", ",", "x", "+", "s", ",", "y", "+", "s", ",", "shape_color", ")", "elif", "c", "in", "\"sS\"", ":", "cover_image", ".", "triangle", "(", "x", ",", "y", ",", "x", "+", "(", "s", "/", "2", ")", ",", "y", "+", "s", ",", "x", "+", "s", ",", "y", ",", "shape_color", ")", "elif", "c", "in", "\"dD\"", ":", "cover_image", ".", "rect", "(", "x", ",", "y", "+", "(", "thick", "*", "2", ")", ",", "s", ",", "thick", ",", "shape_color", ")", "elif", "c", "in", "\"fF\"", ":", "cover_image", ".", "rect", "(", "x", ",", "y", "+", "s", "-", "(", "thick", "*", "3", ")", ",", "s", ",", "thick", ",", "shape_color", ")", "elif", "c", "in", "\"gG\"", ":", "cover_image", ".", "rect", "(", "x", "+", "(", "thick", "*", "2", ")", ",", "y", ",", "thick", ",", "s", ",", "shape_color", ")", "elif", "c", "in", "\"hH\"", ":", "cover_image", ".", "rect", "(", "x", "+", "s", "-", "(", "thick", "*", "3", ")", ",", "y", ",", "thick", ",", "s", ",", "shape_color", ")", "elif", "c", "in", "\"jJ\"", ":", "cover_image", ".", "arc", "(", "x", ",", "y", "-", "s", ",", "2", "*", "s", ",", "2", "*", "s", ",", "90", ",", "180", ",", "shape_color", ",", "thick", ")", "elif", "c", "in", "\"kK\"", ":", "cover_image", ".", "arc", "(", "x", "-", "s", ",", "y", "-", "s", ",", "2", "*", "s", ",", "2", "*", "s", ",", "0", ",", "90", ",", "shape_color", ",", "thick", ")", "elif", "c", "in", "\"lL\"", ":", "cover_image", ".", "rect", "(", "x", ",", "y", ",", "thick", ",", "s", ",", "shape_color", ")", "cover_image", ".", "rect", "(", "x", ",", "y", "+", "s", "-", "thick", ",", "s", ",", "thick", ",", "shape_color", ")", "elif", "c", "==", "\":\"", ":", "cover_image", ".", "rect", "(", "x", "+", "s", "-", "thick", ",", "y", ",", "thick", ",", "s", ",", "shape_color", ")", "cover_image", ".", "rect", "(", "x", ",", "y", "+", "s", "-", "thick", ",", "s", ",", "thick", ",", "shape_color", ")", "elif", "c", "in", "\"zZ\"", ":", "cover_image", ".", "triangle", "(", "x", ",", "y", "+", "(", "s", "/", "2", ")", ",", "x", "+", "(", "s", "/", "2", ")", ",", "y", ",", "x", "+", "s", ",", "y", "+", "(", "s", "/", "2", ")", ",", "shape_color", ")", "cover_image", ".", "triangle", "(", "x", ",", "y", "+", "(", "s", "/", "2", ")", ",", "x", "+", "(", "s", "/", "2", ")", ",", "y", "+", "s", ",", "x", "+", "s", ",", "y", "+", "(", "s", "/", "2", ")", ",", "shape_color", ")", "elif", "c", "in", "\"xX\"", ":", "cover_image", ".", "ellipse", "(", "x", "+", "(", "s", "/", "2", ")", ",", "y", "+", "(", "s", "/", "3", ")", ",", "thick", "*", "2", ",", "thick", "*", "2", ",", "shape_color", ")", "cover_image", ".", "ellipse", "(", "x", "+", "(", "s", "/", "3", ")", ",", "y", "+", "s", "-", "(", "s", "/", "3", ")", ",", "thick", "*", "2", ",", "thick", "*", "2", ",", "shape_color", ")", "cover_image", ".", "ellipse", "(", "x", "+", "s", "-", "(", "s", "/", "3", ")", ",", "y", "+", "s", "-", "(", "s", "/", "3", ")", ",", "thick", "*", "2", ",", "thick", "*", "2", ",", "shape_color", ")", "elif", "c", "in", "\"cC\"", ":", "cover_image", ".", "rect", "(", "x", ",", "y", "+", "(", "thick", "*", "3", ")", ",", "s", ",", "thick", ",", "shape_color", ")", "elif", "c", "in", "\"vV\"", ":", "cover_image", ".", "rect", "(", "x", ",", "y", ",", "s", ",", "s", ",", "shape_color", ")", "cover_image", ".", "triangle", "(", "x", "+", "thick", ",", "y", ",", "x", "+", "(", "s", "/", "2", ")", ",", "y", "+", "(", "s", "/", "2", ")", "-", "thick", ",", "x", "+", "s", "-", "thick", ",", "y", ",", "base_color", ")", "cover_image", ".", "triangle", "(", "x", ",", "y", "+", "thick", ",", "x", "+", "(", "s", "/", "2", ")", "-", "thick", ",", "y", "+", "(", "s", "/", "2", ")", ",", "x", ",", "y", "+", "s", "-", "thick", ",", "base_color", ")", "cover_image", ".", "triangle", "(", "x", "+", "thick", ",", "y", "+", "s", ",", "x", "+", "(", "s", "/", "2", ")", ",", "y", "+", "(", "s", "/", "2", ")", "+", "thick", ",", "x", "+", "s", "-", "thick", ",", "y", "+", "s", ",", "base_color", ")", "cover_image", ".", "triangle", "(", "x", "+", "s", ",", "y", "+", "thick", ",", "x", "+", "s", ",", "y", "+", "s", "-", "thick", ",", "x", "+", "(", "s", "/", "2", ")", "+", "thick", ",", "y", "+", "(", "s", "/", "2", ")", ",", "base_color", ")", "elif", "c", "in", "\"bB\"", ":", "cover_image", ".", "rect", "(", "x", "+", "(", "thick", "*", "3", ")", ",", "y", ",", "thick", ",", "s", ",", "shape_color", ")", "elif", "c", "in", "\"nN\"", ":", "cover_image", ".", "rect", "(", "x", ",", "y", ",", "s", ",", "s", ",", "shape_color", ")", "cover_image", ".", "triangle", "(", "x", ",", "y", ",", "x", "+", "s", "-", "thick", ",", "y", ",", "x", ",", "y", "+", "s", "-", "thick", ",", "base_color", ")", "cover_image", ".", "triangle", "(", "x", "+", "thick", ",", "y", "+", "s", ",", "x", "+", "s", ",", "y", "+", "s", ",", "x", "+", "s", ",", "y", "+", "thick", ",", "base_color", ")", "elif", "c", "in", "\"mM\"", ":", "cover_image", ".", "rect", "(", "x", ",", "y", ",", "s", ",", "s", ",", "shape_color", ")", "cover_image", ".", "triangle", "(", "x", "+", "thick", ",", "y", ",", "x", "+", "s", ",", "y", ",", "x", "+", "s", ",", "y", "+", "s", "-", "thick", ",", "base_color", ")", "cover_image", ".", "triangle", "(", "x", ",", "y", "+", "thick", ",", "x", ",", "y", "+", "s", ",", "x", "+", "s", "-", "thick", ",", "y", "+", "s", ",", "base_color", ")", "elif", "c", "==", "\",\"", ":", "cover_image", ".", "rect", "(", "x", "+", "(", "s", "/", "2", ")", ",", "y", "+", "(", "s", "/", "2", ")", ",", "s", "/", "2", ",", "s", "/", "2", ",", "shape_color", ")", "elif", "c", "==", "\";\"", ":", "cover_image", ".", "rect", "(", "x", ",", "y", "+", "(", "s", "/", "2", ")", ",", "s", "/", "2", ",", "s", "/", "2", ",", "shape_color", ")", "elif", "c", "==", "\"?\"", ":", "cover_image", ".", "rect", "(", "x", ",", "y", ",", "s", "/", "2", ",", "s", "/", "2", ",", "shape_color", ")", "cover_image", ".", "rect", "(", "x", "+", "(", "s", "/", "2", ")", ",", "y", "+", "(", "s", "/", "2", ")", ",", "s", "/", "2", ",", "s", "/", "2", ",", "shape_color", ")", "elif", "c", "==", "\"<\"", ":", "cover_image", ".", "rect", "(", "x", "+", "(", "s", "/", "2", ")", ",", "y", ",", "s", "/", "2", ",", "s", "/", "2", ",", "shape_color", ")", "elif", "c", "==", "\">\"", ":", "cover_image", ".", "rect", "(", "x", ",", "y", ",", "s", "/", "2", ",", "s", "/", "2", ",", "shape_color", ")", "elif", "c", "==", "\"@\"", ":", "cover_image", ".", "rect", "(", "x", ",", "y", "+", "(", "s", "/", "2", ")", "-", "(", "thick", "/", "2", ")", ",", "s", ",", "thick", ",", "shape_color", ")", "elif", "c", "==", "\"[\"", ":", "cover_image", ".", "rect", "(", "x", "+", "(", "s", "/", "2", ")", "-", "(", "thick", "/", "2", ")", ",", "y", ",", "thick", ",", "s", ",", "shape_color", ")", "elif", "c", "==", "\"]\"", ":", "cover_image", ".", "rect", "(", "x", ",", "y", "+", "(", "s", "/", "2", ")", "-", "(", "thick", "/", "2", ")", ",", "s", ",", "thick", ",", "shape_color", ")", "cover_image", ".", "rect", "(", "x", "+", "(", "s", "/", "2", ")", "-", "(", "thick", "/", "2", ")", ",", "y", ",", "thick", ",", "s", ",", "shape_color", ")", "elif", "c", "==", "\"0\"", ":", "cover_image", ".", "rect", "(", "x", "+", "(", "s", "/", "2", ")", "-", "(", "thick", "/", "2", ")", ",", "y", "+", "(", "s", "/", "2", ")", "-", "(", "thick", "/", "2", ")", ",", "thick", ",", "s", "/", "2", "+", "thick", "/", "2", ",", "shape_color", ")", "cover_image", ".", "rect", "(", "x", "+", "(", "s", "/", "2", ")", "-", "(", "thick", "/", "2", ")", ",", "y", "+", "(", "s", "/", "2", ")", "-", "(", "thick", "/", "2", ")", ",", "s", "/", "2", "+", "thick", "/", "2", ",", "thick", ",", "shape_color", ")", "elif", "c", "==", "\"1\"", ":", "cover_image", ".", "rect", "(", "x", ",", "y", "+", "(", "s", "/", "2", ")", "-", "(", "thick", "/", "2", ")", ",", "s", ",", "thick", ",", "shape_color", ")", "cover_image", ".", "rect", "(", "x", "+", "(", "s", "/", "2", ")", "-", "(", "thick", "/", "2", ")", ",", "y", ",", "thick", ",", "s", "/", "2", "+", "thick", "/", "2", ",", "shape_color", ")", "elif", "c", "==", "\"2\"", ":", "cover_image", ".", "rect", "(", "x", ",", "y", "+", "(", "s", "/", "2", ")", "-", "(", "thick", "/", "2", ")", ",", "s", ",", "thick", ",", "shape_color", ")", "cover_image", ".", "rect", "(", "x", "+", "(", "s", "/", "2", ")", "-", "(", "thick", "/", "2", ")", ",", "y", "+", "(", "s", "/", "2", ")", "-", "(", "thick", "/", "2", ")", ",", "thick", ",", "s", "/", "2", "+", "thick", "/", "2", ",", "shape_color", ")", "elif", "c", "==", "\"3\"", ":", "cover_image", ".", "rect", "(", "x", ",", "y", "+", "(", "s", "/", "2", ")", "-", "(", "thick", "/", "2", ")", ",", "s", "/", "2", "+", "thick", "/", "2", ",", "thick", ",", "shape_color", ")", "cover_image", ".", "rect", "(", "x", "+", "(", "s", "/", "2", ")", "-", "(", "thick", "/", "2", ")", ",", "y", ",", "thick", ",", "s", ",", "shape_color", ")", "elif", "c", "==", "\"4\"", ":", "cover_image", ".", "rect", "(", "x", ",", "y", ",", "thick", "*", "2", ",", "s", ",", "shape_color", ")", "elif", "c", "==", "\"5\"", ":", "cover_image", ".", "rect", "(", "x", ",", "y", ",", "thick", "*", "3", ",", "s", ",", "shape_color", ")", "elif", "c", "==", "\"6\"", ":", "cover_image", ".", "rect", "(", "x", "+", "s", "-", "(", "thick", "*", "3", ")", ",", "y", ",", "thick", "*", "3", ",", "s", ",", "shape_color", ")", "elif", "c", "==", "\"7\"", ":", "cover_image", ".", "rect", "(", "x", ",", "y", ",", "s", ",", "thick", "*", "2", ",", "shape_color", ")", "elif", "c", "==", "\"8\"", ":", "cover_image", ".", "rect", "(", "x", ",", "y", ",", "s", ",", "thick", "*", "3", ",", "shape_color", ")", "elif", "c", "==", "\"9\"", ":", "cover_image", ".", "rect", "(", "x", ",", "y", "+", "s", "-", "(", "thick", "*", "3", ")", ",", "s", ",", "thick", "*", "3", ",", "shape_color", ")", "elif", "c", "==", "\".\"", ":", "cover_image", ".", "rect", "(", "x", "+", "(", "s", "/", "2", ")", "-", "(", "thick", "/", "2", ")", ",", "y", "+", "(", "s", "/", "2", ")", "-", "(", "thick", "/", "2", ")", ",", "thick", ",", "s", "/", "2", "+", "thick", "/", "2", ",", "shape_color", ")", "cover_image", ".", "rect", "(", "x", ",", "y", "+", "(", "s", "/", "2", ")", "-", "(", "thick", "/", "2", ")", ",", "s", "/", "2", "+", "thick", "/", "2", ",", "thick", ",", "shape_color", ")", "elif", "c", "==", "\"=\"", ":", "cover_image", ".", "rect", "(", "x", "+", "(", "s", "/", "2", ")", "-", "(", "thick", "/", "2", ")", ",", "y", ",", "thick", ",", "s", "/", "2", "+", "thick", "/", "2", ",", "shape_color", ")", "cover_image", ".", "rect", "(", "x", ",", "y", "+", "(", "s", "/", "2", ")", "-", "(", "thick", "/", "2", ")", ",", "s", "/", "2", ",", "thick", ",", "shape_color", ")", "elif", "c", "==", "\"-\"", ":", "cover_image", ".", "rect", "(", "x", "+", "(", "s", "/", "2", ")", "-", "(", "thick", "/", "2", ")", ",", "y", ",", "thick", ",", "s", "/", "2", "+", "thick", "/", "2", ",", "shape_color", ")", "cover_image", ".", "rect", "(", "x", "+", "(", "s", "/", "2", ")", "-", "(", "thick", "/", "2", ")", ",", "y", "+", "(", "s", "/", "2", ")", "-", "(", "thick", "/", "2", ")", ",", "s", "/", "2", "+", "thick", "/", "2", ",", "thick", ",", "shape_color", ")", "elif", "c", "==", "\"+\"", ":", "cover_image", ".", "rect", "(", "x", "+", "(", "s", "/", "2", ")", "-", "(", "thick", "/", "2", ")", ",", "y", "+", "(", "s", "/", "2", ")", "-", "(", "thick", "/", "2", ")", ",", "s", "/", "2", "+", "thick", "/", "2", ",", "thick", ",", "shape_color", ")", "cover_image", ".", "rect", "(", "x", "+", "(", "s", "/", "2", ")", "-", "(", "thick", "/", "2", ")", ",", "y", ",", "thick", ",", "s", ",", "shape_color", ")", "elif", "c", "==", "\"*\"", ":", "cover_image", ".", "rect", "(", "x", "+", "s", "-", "(", "thick", "*", "2", ")", ",", "y", ",", "thick", "*", "2", ",", "s", ",", "shape_color", ")", "elif", "c", "==", "\"/\"", ":", "cover_image", ".", "rect", "(", "x", ",", "y", "+", "s", "-", "(", "thick", "*", "2", ")", ",", "s", ",", "thick", "*", "2", ",", "shape_color", ")", "elif", "c", "==", "\" \"", ":", "cover_image", ".", "rect", "(", "x", ",", "y", ",", "s", ",", "s", ",", "base_color", ")", "else", ":", "assert", "not", "\"Implement.\"", "# If the text is long, use a smaller font size.", "def", "scale_font", "(", "text", ",", "font_name", ",", "font_properties", ")", ":", "(", "font_size", ",", "font_slant", ",", "font_weight", ")", "=", "font_properties", "w", "=", "len", "(", "text", ")", "*", "font_size", "if", "w", ">", "cover_width", "*", "3", ":", "#This is an empirical, unintelligent, heuristic.", "return", "(", "font_size", "*", "0.8", ",", "font_slant", ",", "font_weight", ")", "elif", "w", "<", "cover_width", ":", "return", "(", "font_size", "*", "1.2", ",", "font_slant", ",", "font_weight", ")", "else", ":", "return", "font_properties", "# return a font appropriate for the text. Uses Noto CJK if text contains CJK, otherwise", "# Noto Sans.", "def", "select_font", "(", "text", ")", ":", "for", "char", "in", "text", ":", "if", "ord", "(", "char", ")", ">=", "0x4E00", ":", "return", "'Noto Sans CJK SC'", "return", "'Noto Sans'", "# Allocate fonts for the title and the author, and draw the text.", "def", "drawText", "(", ")", ":", "fill", "=", "Image", ".", "colorRGB", "(", "50", ",", "50", ",", "50", ")", "title_font_size", "=", "cover_width", "*", "0.08", "subtitle_font_size", "=", "cover_width", "*", "0.05", "title_font_properties", "=", "(", "title_font_size", ",", "cairo", ".", "FONT_SLANT_NORMAL", ",", "cairo", ".", "FONT_WEIGHT_BOLD", ")", "subtitle_font_properties", "=", "(", "subtitle_font_size", ",", "cairo", ".", "FONT_SLANT_NORMAL", ",", "cairo", ".", "FONT_WEIGHT_NORMAL", ")", "title_font_family", "=", "select_font", "(", "title", ")", "subtitle_font_family", "=", "select_font", "(", "subtitle", ")", "title_font_properties", "=", "scale_font", "(", "title", ",", "title_font_family", ",", "title_font_properties", ")", "subtitle_font_properties", "=", "scale_font", "(", "subtitle", ",", "subtitle_font_family", ",", "subtitle_font_properties", ")", "title_font", "=", "cover_image", ".", "font", "(", "title_font_family", ",", "title_font_properties", ")", "subtitle_font", "=", "cover_image", ".", "font", "(", "subtitle_font_family", ",", "subtitle_font_properties", ")", "title_height", "=", "(", "cover_height", "-", "cover_width", "-", "(", "cover_height", "*", "cover_margin", "/", "100", ")", ")", "*", "0.75", "x", "=", "cover_height", "*", "cover_margin", "/", "100", "y", "=", "cover_height", "*", "cover_margin", "/", "100", "*", "2", "width", "=", "cover_width", "-", "(", "2", "*", "cover_height", "*", "cover_margin", "/", "100", ")", "height", "=", "title_height", "title_lines", ",", "font_height", "=", "cover_image", ".", "text", "(", "title", ",", "x", ",", "y", ",", "width", ",", "height", ",", "fill", ",", "title_font", ")", "if", "subtitle", ":", "y", "=", "min", "(", "y", "+", "font_height", "*", "title_lines", "*", "cover_height", ",", "title_height", "-", "subtitle_font_properties", "[", "0", "]", ")", "cover_image", ".", "text", "(", "subtitle", ",", "x", ",", "y", ",", "width", ",", "height", ",", "fill", ",", "subtitle_font", ")", "author_font_size", "=", "cover_width", "*", "0.07", "author_font_properties", "=", "(", "author_font_size", ",", "cairo", ".", "FONT_SLANT_NORMAL", ",", "cairo", ".", "FONT_WEIGHT_NORMAL", ")", "author_font", "=", "cover_image", ".", "font", "(", "select_font", "(", "author", ")", ",", "author_font_properties", ")", "author_height", "=", "(", "cover_height", "-", "cover_width", "-", "(", "cover_height", "*", "cover_margin", "/", "100", ")", ")", "*", "0.25", "x", "=", "cover_height", "*", "cover_margin", "/", "100", "y", "=", "title_height", "width", "=", "cover_width", "-", "(", "2", "*", "cover_height", "*", "cover_margin", "/", "100", ")", "height", "=", "author_height", "cover_image", ".", "text", "(", "author", ",", "x", ",", "y", ",", "width", ",", "height", ",", "fill", ",", "author_font", ")", "# Create the new cover image.", "cover_margin", "=", "2", "cover_image", "=", "Image", "(", "cover_width", ",", "cover_height", ")", "# Draw the book cover.", "shape_color", ",", "base_color", "=", "processColors", "(", ")", "drawBackground", "(", ")", "drawArtwork", "(", ")", "drawText", "(", ")", "# Return the cover Image instance.", "return", "cover_image" ]
45.235099
24.592715
def OIDC_UNAUTHENTICATED_SESSION_MANAGEMENT_KEY(self): """ OPTIONAL. Supply a fixed string to use as browser-state key for unauthenticated clients. """ # Memoize generated value if not self._unauthenticated_session_management_key: self._unauthenticated_session_management_key = ''.join( random.choice(string.ascii_uppercase + string.digits) for _ in range(100)) return self._unauthenticated_session_management_key
[ "def", "OIDC_UNAUTHENTICATED_SESSION_MANAGEMENT_KEY", "(", "self", ")", ":", "# Memoize generated value", "if", "not", "self", ".", "_unauthenticated_session_management_key", ":", "self", ".", "_unauthenticated_session_management_key", "=", "''", ".", "join", "(", "random", ".", "choice", "(", "string", ".", "ascii_uppercase", "+", "string", ".", "digits", ")", "for", "_", "in", "range", "(", "100", ")", ")", "return", "self", ".", "_unauthenticated_session_management_key" ]
48.1
23.3
def _varargs_checks_gen(self, decorated_function, function_spec, arg_specs): """ Generate checks for positional variable argument (varargs) testing :param decorated_function: function decorator :param function_spec: function inspect information :param arg_specs: argument specification (same as arg_specs in :meth:`.Verifier.decorate`) :return: internal structure, that is used by :meth:`.Verifier._varargs_checks_test` """ inspected_varargs = function_spec.varargs if inspected_varargs is not None and inspected_varargs in arg_specs.keys(): return self.check( arg_specs[inspected_varargs], inspected_varargs, decorated_function )
[ "def", "_varargs_checks_gen", "(", "self", ",", "decorated_function", ",", "function_spec", ",", "arg_specs", ")", ":", "inspected_varargs", "=", "function_spec", ".", "varargs", "if", "inspected_varargs", "is", "not", "None", "and", "inspected_varargs", "in", "arg_specs", ".", "keys", "(", ")", ":", "return", "self", ".", "check", "(", "arg_specs", "[", "inspected_varargs", "]", ",", "inspected_varargs", ",", "decorated_function", ")" ]
43
26.533333
def _generate_notebooks_by_category(notebook_object, dict_by_tag): """ Internal function that is used for generation of the page "Notebooks by Category". ---------- Parameters ---------- notebook_object : notebook object Object of "notebook" class where the body will be created. dict_by_tag : dict Dictionary where each key is a tag and the respective value will be a list containing the Notebooks (title and filename) that include this tag. """ # ============================ Insertion of an opening text ==================================== markdown_cell = OPEN_IMAGE # == Generation of a table that group Notebooks by category the information about each signal == category_list = list(NOTEBOOK_KEYS.keys()) tag_keys = list(dict_by_tag.keys()) markdown_cell += """\n<table id="notebook_list" width="100%"> <tr> <td width="20%" class="center_cell group_by_header_grey"> Category </td> <td width="60%" class="center_cell group_by_header"></td> <td width="20%" class="center_cell"></td> </tr>""" for i, category in enumerate(category_list): if category != "MainFiles": if category.lower() in tag_keys: if i == 0: first_border = "color1_top" else: first_border = "" nbr_notebooks = len(dict_by_tag[category.lower()]) markdown_cell += "\n\t<tr>" \ "\n\t\t<td rowspan='" + str(nbr_notebooks + 1) + "' class='center_cell open_cell_border_" + str(NOTEBOOK_KEYS[category]) + "'><span style='float:center'><img src='../../images/icons/" + category + ".png' class='icon' style='vertical-align:middle'></span> <span style='float:center' class='color" + str(NOTEBOOK_KEYS[category]) + "'>" + category + "</span></td>" \ "\n\t\t<td class='center_cell color" + str(NOTEBOOK_KEYS[category]) + "_cell " + first_border + "'><span style='float:center'>" + category + "</span></td>" \ "\n\t\t<td class='center_cell gradient_color" + str(NOTEBOOK_KEYS[category]) + "'></td>" \ "\n\t</tr>" notebook_list = dict_by_tag[category.lower()] for j, notebook_file in enumerate(notebook_list): if j == len(notebook_list) - 1: last_border = "class='border_cell_bottom_white'" else: last_border = "" split_path = notebook_file.replace("\\", "/").split("/") notebook_name = split_path[-1].split("&")[0] notebook_title = split_path[-1].split("&")[1] markdown_cell += "\n\t<tr " + last_border + ">" \ "\n\t\t<td class='center_cell open_cell_light'> <a href='../" + category + "/" + notebook_name + "'>" + notebook_title + "</a> </td>" \ "\n\t\t<td class='center_cell'> <a href='../" + category + "/" + notebook_name + "'><div class='file_icon'></div></a> </td>" \ "\n\t</tr>" markdown_cell += "\n</table>" # ============================ Insertion of an introductory text =============================== markdown_cell += DESCRIPTION_CATEGORY # =================== Insertion of the HTML table inside a markdown cell ======================= notebook_object["cells"].append(nb.v4.new_markdown_cell(markdown_cell))
[ "def", "_generate_notebooks_by_category", "(", "notebook_object", ",", "dict_by_tag", ")", ":", "# ============================ Insertion of an opening text ====================================", "markdown_cell", "=", "OPEN_IMAGE", "# == Generation of a table that group Notebooks by category the information about each signal ==", "category_list", "=", "list", "(", "NOTEBOOK_KEYS", ".", "keys", "(", ")", ")", "tag_keys", "=", "list", "(", "dict_by_tag", ".", "keys", "(", ")", ")", "markdown_cell", "+=", "\"\"\"\\n<table id=\"notebook_list\" width=\"100%\">\n <tr>\n <td width=\"20%\" class=\"center_cell group_by_header_grey\"> Category </td>\n <td width=\"60%\" class=\"center_cell group_by_header\"></td>\n <td width=\"20%\" class=\"center_cell\"></td>\n </tr>\"\"\"", "for", "i", ",", "category", "in", "enumerate", "(", "category_list", ")", ":", "if", "category", "!=", "\"MainFiles\"", ":", "if", "category", ".", "lower", "(", ")", "in", "tag_keys", ":", "if", "i", "==", "0", ":", "first_border", "=", "\"color1_top\"", "else", ":", "first_border", "=", "\"\"", "nbr_notebooks", "=", "len", "(", "dict_by_tag", "[", "category", ".", "lower", "(", ")", "]", ")", "markdown_cell", "+=", "\"\\n\\t<tr>\"", "\"\\n\\t\\t<td rowspan='\"", "+", "str", "(", "nbr_notebooks", "+", "1", ")", "+", "\"' class='center_cell open_cell_border_\"", "+", "str", "(", "NOTEBOOK_KEYS", "[", "category", "]", ")", "+", "\"'><span style='float:center'><img src='../../images/icons/\"", "+", "category", "+", "\".png' class='icon' style='vertical-align:middle'></span> <span style='float:center' class='color\"", "+", "str", "(", "NOTEBOOK_KEYS", "[", "category", "]", ")", "+", "\"'>\"", "+", "category", "+", "\"</span></td>\"", "\"\\n\\t\\t<td class='center_cell color\"", "+", "str", "(", "NOTEBOOK_KEYS", "[", "category", "]", ")", "+", "\"_cell \"", "+", "first_border", "+", "\"'><span style='float:center'>\"", "+", "category", "+", "\"</span></td>\"", "\"\\n\\t\\t<td class='center_cell gradient_color\"", "+", "str", "(", "NOTEBOOK_KEYS", "[", "category", "]", ")", "+", "\"'></td>\"", "\"\\n\\t</tr>\"", "notebook_list", "=", "dict_by_tag", "[", "category", ".", "lower", "(", ")", "]", "for", "j", ",", "notebook_file", "in", "enumerate", "(", "notebook_list", ")", ":", "if", "j", "==", "len", "(", "notebook_list", ")", "-", "1", ":", "last_border", "=", "\"class='border_cell_bottom_white'\"", "else", ":", "last_border", "=", "\"\"", "split_path", "=", "notebook_file", ".", "replace", "(", "\"\\\\\"", ",", "\"/\"", ")", ".", "split", "(", "\"/\"", ")", "notebook_name", "=", "split_path", "[", "-", "1", "]", ".", "split", "(", "\"&\"", ")", "[", "0", "]", "notebook_title", "=", "split_path", "[", "-", "1", "]", ".", "split", "(", "\"&\"", ")", "[", "1", "]", "markdown_cell", "+=", "\"\\n\\t<tr \"", "+", "last_border", "+", "\">\"", "\"\\n\\t\\t<td class='center_cell open_cell_light'> <a href='../\"", "+", "category", "+", "\"/\"", "+", "notebook_name", "+", "\"'>\"", "+", "notebook_title", "+", "\"</a> </td>\"", "\"\\n\\t\\t<td class='center_cell'> <a href='../\"", "+", "category", "+", "\"/\"", "+", "notebook_name", "+", "\"'><div class='file_icon'></div></a> </td>\"", "\"\\n\\t</tr>\"", "markdown_cell", "+=", "\"\\n</table>\"", "# ============================ Insertion of an introductory text ===============================", "markdown_cell", "+=", "DESCRIPTION_CATEGORY", "# =================== Insertion of the HTML table inside a markdown cell =======================", "notebook_object", "[", "\"cells\"", "]", ".", "append", "(", "nb", ".", "v4", ".", "new_markdown_cell", "(", "markdown_cell", ")", ")" ]
51.882353
35.397059
def _parse_adf_output(self): """ Parse the standard ADF output file. """ numerical_freq_patt = re.compile( r"\s+\*\s+F\sR\sE\sQ\sU\sE\sN\sC\sI\sE\sS\s+\*") analytic_freq_patt = re.compile( r"\s+\*\s+F\sR\sE\sQ\sU\sE\sN\sC\sY\s+A\sN\sA\sL\sY\sS\sI\sS\s+\*") freq_on_patt = re.compile(r"Vibrations\sand\sNormal\sModes\s+\*+.*\*+") freq_off_patt = re.compile(r"List\sof\sAll\sFrequencies:") mode_patt = re.compile(r"\s+(\d+)\.([A-Za-z]+)\s+(.*)") coord_patt = re.compile(r"\s+(\d+)\s+([A-Za-z]+)" + 6 * r"\s+([0-9\.-]+)") coord_on_patt = re.compile(r"\s+\*\s+R\sU\sN\s+T\sY\sP\sE\s:\sFREQUENCIES\s+\*") parse_freq = False parse_mode = False nnext = 0 nstrike = 0 sites = [] self.frequencies = [] self.normal_modes = [] if self.final_structure is None: find_structure = True parse_coord = False natoms = 0 else: find_structure = False parse_coord = False natoms = self.final_structure.num_sites with open(self.filename, "r") as f: for line in f: if self.run_type == "NumericalFreq" and find_structure: if not parse_coord: m = coord_on_patt.search(line) if m: parse_coord = True else: m = coord_patt.search(line) if m: sites.append( [m.group(2), list(map(float, m.groups()[2:5]))]) nstrike += 1 elif nstrike > 0: find_structure = False self.final_structure = self._sites_to_mol(sites) natoms = self.final_structure.num_sites elif self.freq_type is None: if numerical_freq_patt.search(line): self.freq_type = "Numerical" elif analytic_freq_patt.search(line): self.freq_type = "Analytical" self.run_type = "AnalyticalFreq" elif freq_on_patt.search(line): parse_freq = True elif parse_freq: if freq_off_patt.search(line): break el = line.strip().split() if 1 <= len(el) <= 3 and line.find(".") != -1: nnext = len(el) parse_mode = True parse_freq = False self.frequencies.extend(map(float, el)) for i in range(nnext): self.normal_modes.append([]) elif parse_mode: m = mode_patt.search(line) if m: v = list(chunks(map(float, m.group(3).split()), 3)) if len(v) != nnext: raise AdfOutputError("Odd Error!") for i, k in enumerate(range(-nnext, 0, 1)): self.normal_modes[k].extend(v[i]) if int(m.group(1)) == natoms: parse_freq = True parse_mode = False if isinstance(self.final_structure, list): self.final_structure = self._sites_to_mol(self.final_structure) if self.freq_type is not None: if len(self.frequencies) != len(self.normal_modes): raise AdfOutputError("The number of normal modes is wrong!") if len(self.normal_modes[0]) != natoms * 3: raise AdfOutputError("The dimensions of the modes are wrong!")
[ "def", "_parse_adf_output", "(", "self", ")", ":", "numerical_freq_patt", "=", "re", ".", "compile", "(", "r\"\\s+\\*\\s+F\\sR\\sE\\sQ\\sU\\sE\\sN\\sC\\sI\\sE\\sS\\s+\\*\"", ")", "analytic_freq_patt", "=", "re", ".", "compile", "(", "r\"\\s+\\*\\s+F\\sR\\sE\\sQ\\sU\\sE\\sN\\sC\\sY\\s+A\\sN\\sA\\sL\\sY\\sS\\sI\\sS\\s+\\*\"", ")", "freq_on_patt", "=", "re", ".", "compile", "(", "r\"Vibrations\\sand\\sNormal\\sModes\\s+\\*+.*\\*+\"", ")", "freq_off_patt", "=", "re", ".", "compile", "(", "r\"List\\sof\\sAll\\sFrequencies:\"", ")", "mode_patt", "=", "re", ".", "compile", "(", "r\"\\s+(\\d+)\\.([A-Za-z]+)\\s+(.*)\"", ")", "coord_patt", "=", "re", ".", "compile", "(", "r\"\\s+(\\d+)\\s+([A-Za-z]+)\"", "+", "6", "*", "r\"\\s+([0-9\\.-]+)\"", ")", "coord_on_patt", "=", "re", ".", "compile", "(", "r\"\\s+\\*\\s+R\\sU\\sN\\s+T\\sY\\sP\\sE\\s:\\sFREQUENCIES\\s+\\*\"", ")", "parse_freq", "=", "False", "parse_mode", "=", "False", "nnext", "=", "0", "nstrike", "=", "0", "sites", "=", "[", "]", "self", ".", "frequencies", "=", "[", "]", "self", ".", "normal_modes", "=", "[", "]", "if", "self", ".", "final_structure", "is", "None", ":", "find_structure", "=", "True", "parse_coord", "=", "False", "natoms", "=", "0", "else", ":", "find_structure", "=", "False", "parse_coord", "=", "False", "natoms", "=", "self", ".", "final_structure", ".", "num_sites", "with", "open", "(", "self", ".", "filename", ",", "\"r\"", ")", "as", "f", ":", "for", "line", "in", "f", ":", "if", "self", ".", "run_type", "==", "\"NumericalFreq\"", "and", "find_structure", ":", "if", "not", "parse_coord", ":", "m", "=", "coord_on_patt", ".", "search", "(", "line", ")", "if", "m", ":", "parse_coord", "=", "True", "else", ":", "m", "=", "coord_patt", ".", "search", "(", "line", ")", "if", "m", ":", "sites", ".", "append", "(", "[", "m", ".", "group", "(", "2", ")", ",", "list", "(", "map", "(", "float", ",", "m", ".", "groups", "(", ")", "[", "2", ":", "5", "]", ")", ")", "]", ")", "nstrike", "+=", "1", "elif", "nstrike", ">", "0", ":", "find_structure", "=", "False", "self", ".", "final_structure", "=", "self", ".", "_sites_to_mol", "(", "sites", ")", "natoms", "=", "self", ".", "final_structure", ".", "num_sites", "elif", "self", ".", "freq_type", "is", "None", ":", "if", "numerical_freq_patt", ".", "search", "(", "line", ")", ":", "self", ".", "freq_type", "=", "\"Numerical\"", "elif", "analytic_freq_patt", ".", "search", "(", "line", ")", ":", "self", ".", "freq_type", "=", "\"Analytical\"", "self", ".", "run_type", "=", "\"AnalyticalFreq\"", "elif", "freq_on_patt", ".", "search", "(", "line", ")", ":", "parse_freq", "=", "True", "elif", "parse_freq", ":", "if", "freq_off_patt", ".", "search", "(", "line", ")", ":", "break", "el", "=", "line", ".", "strip", "(", ")", ".", "split", "(", ")", "if", "1", "<=", "len", "(", "el", ")", "<=", "3", "and", "line", ".", "find", "(", "\".\"", ")", "!=", "-", "1", ":", "nnext", "=", "len", "(", "el", ")", "parse_mode", "=", "True", "parse_freq", "=", "False", "self", ".", "frequencies", ".", "extend", "(", "map", "(", "float", ",", "el", ")", ")", "for", "i", "in", "range", "(", "nnext", ")", ":", "self", ".", "normal_modes", ".", "append", "(", "[", "]", ")", "elif", "parse_mode", ":", "m", "=", "mode_patt", ".", "search", "(", "line", ")", "if", "m", ":", "v", "=", "list", "(", "chunks", "(", "map", "(", "float", ",", "m", ".", "group", "(", "3", ")", ".", "split", "(", ")", ")", ",", "3", ")", ")", "if", "len", "(", "v", ")", "!=", "nnext", ":", "raise", "AdfOutputError", "(", "\"Odd Error!\"", ")", "for", "i", ",", "k", "in", "enumerate", "(", "range", "(", "-", "nnext", ",", "0", ",", "1", ")", ")", ":", "self", ".", "normal_modes", "[", "k", "]", ".", "extend", "(", "v", "[", "i", "]", ")", "if", "int", "(", "m", ".", "group", "(", "1", ")", ")", "==", "natoms", ":", "parse_freq", "=", "True", "parse_mode", "=", "False", "if", "isinstance", "(", "self", ".", "final_structure", ",", "list", ")", ":", "self", ".", "final_structure", "=", "self", ".", "_sites_to_mol", "(", "self", ".", "final_structure", ")", "if", "self", ".", "freq_type", "is", "not", "None", ":", "if", "len", "(", "self", ".", "frequencies", ")", "!=", "len", "(", "self", ".", "normal_modes", ")", ":", "raise", "AdfOutputError", "(", "\"The number of normal modes is wrong!\"", ")", "if", "len", "(", "self", ".", "normal_modes", "[", "0", "]", ")", "!=", "natoms", "*", "3", ":", "raise", "AdfOutputError", "(", "\"The dimensions of the modes are wrong!\"", ")" ]
42.444444
16.933333
def get_win32_short_path_name(long_name): """ Gets the short path name of a given long path. References: http://stackoverflow.com/a/23598461/200291 http://stackoverflow.com/questions/23598289/get-win-short-fname-python Example: >>> # DISABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> import utool as ut # NOQA >>> # build test data >>> #long_name = unicode(normpath(ut.get_resource_dir())) >>> long_name = unicode(r'C:/Program Files (x86)') >>> #long_name = unicode(r'C:/Python27') #unicode(normpath(ut.get_resource_dir())) >>> # execute function >>> result = get_win32_short_path_name(long_name) >>> # verify results >>> print(result) C:/PROGRA~2 """ import ctypes from ctypes import wintypes _GetShortPathNameW = ctypes.windll.kernel32.GetShortPathNameW _GetShortPathNameW.argtypes = [wintypes.LPCWSTR, wintypes.LPWSTR, wintypes.DWORD] _GetShortPathNameW.restype = wintypes.DWORD output_buf_size = 0 while True: output_buf = ctypes.create_unicode_buffer(output_buf_size) needed = _GetShortPathNameW(long_name, output_buf, output_buf_size) if output_buf_size >= needed: short_name = output_buf.value break else: output_buf_size = needed return short_name
[ "def", "get_win32_short_path_name", "(", "long_name", ")", ":", "import", "ctypes", "from", "ctypes", "import", "wintypes", "_GetShortPathNameW", "=", "ctypes", ".", "windll", ".", "kernel32", ".", "GetShortPathNameW", "_GetShortPathNameW", ".", "argtypes", "=", "[", "wintypes", ".", "LPCWSTR", ",", "wintypes", ".", "LPWSTR", ",", "wintypes", ".", "DWORD", "]", "_GetShortPathNameW", ".", "restype", "=", "wintypes", ".", "DWORD", "output_buf_size", "=", "0", "while", "True", ":", "output_buf", "=", "ctypes", ".", "create_unicode_buffer", "(", "output_buf_size", ")", "needed", "=", "_GetShortPathNameW", "(", "long_name", ",", "output_buf", ",", "output_buf_size", ")", "if", "output_buf_size", ">=", "needed", ":", "short_name", "=", "output_buf", ".", "value", "break", "else", ":", "output_buf_size", "=", "needed", "return", "short_name" ]
36.131579
17.078947
def merge_required_files(dirnames, out_dir): """Merges the required files from each of the directories. :param dirnames: the list of directories to merge data from. :param out_dir: the name of the output directory. :type dirnames: list :type out_dir: str """ # The list of files to merge fn_to_merge = ("steps_summary.tex", "excluded_markers.txt", "excluded_samples.txt") # Merging the files for fn in fn_to_merge: o_fn = os.path.join(out_dir, fn) with open(o_fn, "w") as o_file: for dn in dirnames: i_fn = os.path.join(dn, fn) with open(i_fn, "r") as i_file: o_file.write(i_file.read()) # Merging the result summary file o_fn = os.path.join(out_dir, "results_summary.txt") with open(o_fn, "w") as o_file: for i, dn in enumerate(dirnames): i_fn = os.path.join(dn, "results_summary.txt") with open(i_fn, "r") as i_file: if i != 0: # We skip the first 4 lines (file descriptions) [i_file.readline() for i in range(4)] o_file.write(i_file.read()) # Merging the graphic paths file graphic_paths = set() for dn in dirnames: fn = os.path.join(dn, "graphic_paths.txt") if os.path.isfile(fn): with open(fn, "r") as i_file: graphic_paths.update({ os.path.join(dn, path) for path in i_file.read().splitlines() }) if len(graphic_paths) > 0: with open(os.path.join(out_dir, "graphic_paths.txt"), "w") as o_file: for path in sorted(graphic_paths): print >>o_file, os.path.relpath(path, out_dir)
[ "def", "merge_required_files", "(", "dirnames", ",", "out_dir", ")", ":", "# The list of files to merge", "fn_to_merge", "=", "(", "\"steps_summary.tex\"", ",", "\"excluded_markers.txt\"", ",", "\"excluded_samples.txt\"", ")", "# Merging the files", "for", "fn", "in", "fn_to_merge", ":", "o_fn", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "fn", ")", "with", "open", "(", "o_fn", ",", "\"w\"", ")", "as", "o_file", ":", "for", "dn", "in", "dirnames", ":", "i_fn", "=", "os", ".", "path", ".", "join", "(", "dn", ",", "fn", ")", "with", "open", "(", "i_fn", ",", "\"r\"", ")", "as", "i_file", ":", "o_file", ".", "write", "(", "i_file", ".", "read", "(", ")", ")", "# Merging the result summary file", "o_fn", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"results_summary.txt\"", ")", "with", "open", "(", "o_fn", ",", "\"w\"", ")", "as", "o_file", ":", "for", "i", ",", "dn", "in", "enumerate", "(", "dirnames", ")", ":", "i_fn", "=", "os", ".", "path", ".", "join", "(", "dn", ",", "\"results_summary.txt\"", ")", "with", "open", "(", "i_fn", ",", "\"r\"", ")", "as", "i_file", ":", "if", "i", "!=", "0", ":", "# We skip the first 4 lines (file descriptions)", "[", "i_file", ".", "readline", "(", ")", "for", "i", "in", "range", "(", "4", ")", "]", "o_file", ".", "write", "(", "i_file", ".", "read", "(", ")", ")", "# Merging the graphic paths file", "graphic_paths", "=", "set", "(", ")", "for", "dn", "in", "dirnames", ":", "fn", "=", "os", ".", "path", ".", "join", "(", "dn", ",", "\"graphic_paths.txt\"", ")", "if", "os", ".", "path", ".", "isfile", "(", "fn", ")", ":", "with", "open", "(", "fn", ",", "\"r\"", ")", "as", "i_file", ":", "graphic_paths", ".", "update", "(", "{", "os", ".", "path", ".", "join", "(", "dn", ",", "path", ")", "for", "path", "in", "i_file", ".", "read", "(", ")", ".", "splitlines", "(", ")", "}", ")", "if", "len", "(", "graphic_paths", ")", ">", "0", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"graphic_paths.txt\"", ")", ",", "\"w\"", ")", "as", "o_file", ":", "for", "path", "in", "sorted", "(", "graphic_paths", ")", ":", "print", ">>", "o_file", ",", "os", ".", "path", ".", "relpath", "(", "path", ",", "out_dir", ")" ]
36.395833
14.333333
def get_encoder(self, content_type): """ Get the encoding function for the provided content type for this bucket. :param content_type: the requested media type :type content_type: str :param content_type: Content type requested """ if content_type in self._encoders: return self._encoders[content_type] else: return self._client.get_encoder(content_type)
[ "def", "get_encoder", "(", "self", ",", "content_type", ")", ":", "if", "content_type", "in", "self", ".", "_encoders", ":", "return", "self", ".", "_encoders", "[", "content_type", "]", "else", ":", "return", "self", ".", "_client", ".", "get_encoder", "(", "content_type", ")" ]
33.769231
13.615385
def fit(self, x0=None, distribution='lognormal', n=None, **kwargs): '''Incomplete method to fit experimental values to a curve. It is very hard to get good initial guesses, which are really required for this. Differential evolution is promissing. This API is likely to change in the future. ''' dist = {'lognormal': PSDLognormal, 'GGS': PSDGatesGaudinSchuhman, 'RR': PSDRosinRammler}[distribution] if distribution == 'lognormal': if x0 is None: d_characteristic = sum([fi*di for fi, di in zip(self.fractions, self.Dis)]) s = 0.4 x0 = [d_characteristic, s] elif distribution == 'GGS': if x0 is None: d_characteristic = sum([fi*di for fi, di in zip(self.fractions, self.Dis)]) m = 1.5 x0 = [d_characteristic, m] elif distribution == 'RR': if x0 is None: x0 = [5E-6, 1e-2] from scipy.optimize import minimize return minimize(self._fit_obj_function, x0, args=(dist, n), **kwargs)
[ "def", "fit", "(", "self", ",", "x0", "=", "None", ",", "distribution", "=", "'lognormal'", ",", "n", "=", "None", ",", "*", "*", "kwargs", ")", ":", "dist", "=", "{", "'lognormal'", ":", "PSDLognormal", ",", "'GGS'", ":", "PSDGatesGaudinSchuhman", ",", "'RR'", ":", "PSDRosinRammler", "}", "[", "distribution", "]", "if", "distribution", "==", "'lognormal'", ":", "if", "x0", "is", "None", ":", "d_characteristic", "=", "sum", "(", "[", "fi", "*", "di", "for", "fi", ",", "di", "in", "zip", "(", "self", ".", "fractions", ",", "self", ".", "Dis", ")", "]", ")", "s", "=", "0.4", "x0", "=", "[", "d_characteristic", ",", "s", "]", "elif", "distribution", "==", "'GGS'", ":", "if", "x0", "is", "None", ":", "d_characteristic", "=", "sum", "(", "[", "fi", "*", "di", "for", "fi", ",", "di", "in", "zip", "(", "self", ".", "fractions", ",", "self", ".", "Dis", ")", "]", ")", "m", "=", "1.5", "x0", "=", "[", "d_characteristic", ",", "m", "]", "elif", "distribution", "==", "'RR'", ":", "if", "x0", "is", "None", ":", "x0", "=", "[", "5E-6", ",", "1e-2", "]", "from", "scipy", ".", "optimize", "import", "minimize", "return", "minimize", "(", "self", ".", "_fit_obj_function", ",", "x0", ",", "args", "=", "(", "dist", ",", "n", ")", ",", "*", "*", "kwargs", ")" ]
45.2
19.36
def get_queue_obj(session, queue_url, log_url): """Checks that all the data that is needed for submit verification is available.""" skip = False if not queue_url: logger.error("The queue url is not configured, skipping submit verification") skip = True if not session: logger.error("Missing requests session, skipping submit verification") skip = True queue = QueueSearch(session=session, queue_url=queue_url, log_url=log_url) queue.skip = skip return queue
[ "def", "get_queue_obj", "(", "session", ",", "queue_url", ",", "log_url", ")", ":", "skip", "=", "False", "if", "not", "queue_url", ":", "logger", ".", "error", "(", "\"The queue url is not configured, skipping submit verification\"", ")", "skip", "=", "True", "if", "not", "session", ":", "logger", ".", "error", "(", "\"Missing requests session, skipping submit verification\"", ")", "skip", "=", "True", "queue", "=", "QueueSearch", "(", "session", "=", "session", ",", "queue_url", "=", "queue_url", ",", "log_url", "=", "log_url", ")", "queue", ".", "skip", "=", "skip", "return", "queue" ]
38.923077
24.384615
def tar_to_bigfile(self, fname, outfile): """Convert tar of multiple FASTAs to one file.""" fnames = [] tmpdir = mkdtemp() # Extract files to temporary directory with tarfile.open(fname) as tar: tar.extractall(path=tmpdir) for root, _, files in os.walk(tmpdir): fnames += [os.path.join(root, fname) for fname in files] # Concatenate with open(outfile, "w") as out: for infile in fnames: for line in open(infile): out.write(line) os.unlink(infile) # Remove temp dir shutil.rmtree(tmpdir)
[ "def", "tar_to_bigfile", "(", "self", ",", "fname", ",", "outfile", ")", ":", "fnames", "=", "[", "]", "tmpdir", "=", "mkdtemp", "(", ")", "# Extract files to temporary directory", "with", "tarfile", ".", "open", "(", "fname", ")", "as", "tar", ":", "tar", ".", "extractall", "(", "path", "=", "tmpdir", ")", "for", "root", ",", "_", ",", "files", "in", "os", ".", "walk", "(", "tmpdir", ")", ":", "fnames", "+=", "[", "os", ".", "path", ".", "join", "(", "root", ",", "fname", ")", "for", "fname", "in", "files", "]", "# Concatenate", "with", "open", "(", "outfile", ",", "\"w\"", ")", "as", "out", ":", "for", "infile", "in", "fnames", ":", "for", "line", "in", "open", "(", "infile", ")", ":", "out", ".", "write", "(", "line", ")", "os", ".", "unlink", "(", "infile", ")", "# Remove temp dir", "shutil", ".", "rmtree", "(", "tmpdir", ")" ]
33.1
11.95
def execute_command(parser, config, ext_classes): """ Banana banana """ res = 0 cmd = config.get('command') get_private_folder = config.get('get_private_folder', False) if cmd == 'help': parser.print_help() elif cmd == 'run' or get_private_folder: # git.mk backward compat app = Application(ext_classes) try: app.parse_config(config) if get_private_folder: print(app.private_folder) return res res = app.run() except HotdocException: res = len(Logger.get_issues()) except Exception: # pylint: disable=broad-except print("An unknown error happened while building the documentation" " and hotdoc cannot recover from it. Please report " "a bug with this error message and the steps to " "reproduce it") traceback.print_exc() res = 1 finally: app.finalize() elif cmd == 'init': try: create_default_layout(config) except HotdocException: res = 1 elif cmd == 'conf': config.dump(conf_file=config.get('output_conf_file', None)) elif cmd is None: if config.get('version'): print(VERSION) elif config.get('makefile_path'): here = os.path.dirname(__file__) path = os.path.join(here, 'utils', 'hotdoc.mk') print(os.path.abspath(path)) elif config.get('get_conf_path'): key = config.get('get_conf_path') path = config.get_path(key, rel_to_cwd=True) if path is not None: print(path) elif config.get('get_conf_key'): key = config.get('get_conf_key') value = config.get(key, None) if value is not None: print(value) else: parser.print_usage() else: parser.print_usage() return res
[ "def", "execute_command", "(", "parser", ",", "config", ",", "ext_classes", ")", ":", "res", "=", "0", "cmd", "=", "config", ".", "get", "(", "'command'", ")", "get_private_folder", "=", "config", ".", "get", "(", "'get_private_folder'", ",", "False", ")", "if", "cmd", "==", "'help'", ":", "parser", ".", "print_help", "(", ")", "elif", "cmd", "==", "'run'", "or", "get_private_folder", ":", "# git.mk backward compat", "app", "=", "Application", "(", "ext_classes", ")", "try", ":", "app", ".", "parse_config", "(", "config", ")", "if", "get_private_folder", ":", "print", "(", "app", ".", "private_folder", ")", "return", "res", "res", "=", "app", ".", "run", "(", ")", "except", "HotdocException", ":", "res", "=", "len", "(", "Logger", ".", "get_issues", "(", ")", ")", "except", "Exception", ":", "# pylint: disable=broad-except", "print", "(", "\"An unknown error happened while building the documentation\"", "\" and hotdoc cannot recover from it. Please report \"", "\"a bug with this error message and the steps to \"", "\"reproduce it\"", ")", "traceback", ".", "print_exc", "(", ")", "res", "=", "1", "finally", ":", "app", ".", "finalize", "(", ")", "elif", "cmd", "==", "'init'", ":", "try", ":", "create_default_layout", "(", "config", ")", "except", "HotdocException", ":", "res", "=", "1", "elif", "cmd", "==", "'conf'", ":", "config", ".", "dump", "(", "conf_file", "=", "config", ".", "get", "(", "'output_conf_file'", ",", "None", ")", ")", "elif", "cmd", "is", "None", ":", "if", "config", ".", "get", "(", "'version'", ")", ":", "print", "(", "VERSION", ")", "elif", "config", ".", "get", "(", "'makefile_path'", ")", ":", "here", "=", "os", ".", "path", ".", "dirname", "(", "__file__", ")", "path", "=", "os", ".", "path", ".", "join", "(", "here", ",", "'utils'", ",", "'hotdoc.mk'", ")", "print", "(", "os", ".", "path", ".", "abspath", "(", "path", ")", ")", "elif", "config", ".", "get", "(", "'get_conf_path'", ")", ":", "key", "=", "config", ".", "get", "(", "'get_conf_path'", ")", "path", "=", "config", ".", "get_path", "(", "key", ",", "rel_to_cwd", "=", "True", ")", "if", "path", "is", "not", "None", ":", "print", "(", "path", ")", "elif", "config", ".", "get", "(", "'get_conf_key'", ")", ":", "key", "=", "config", ".", "get", "(", "'get_conf_key'", ")", "value", "=", "config", ".", "get", "(", "key", ",", "None", ")", "if", "value", "is", "not", "None", ":", "print", "(", "value", ")", "else", ":", "parser", ".", "print_usage", "(", ")", "else", ":", "parser", ".", "print_usage", "(", ")", "return", "res" ]
32.55
14.916667
def describe(self): """Describe the model.""" result = "No description available" if self.description: result = "%s" % self.description else: if self.__doc__: s = [] s += [self.__doc__.strip().replace('\n', ''). replace(' ', ' ')] result = '\n'.join(s) return result
[ "def", "describe", "(", "self", ")", ":", "result", "=", "\"No description available\"", "if", "self", ".", "description", ":", "result", "=", "\"%s\"", "%", "self", ".", "description", "else", ":", "if", "self", ".", "__doc__", ":", "s", "=", "[", "]", "s", "+=", "[", "self", ".", "__doc__", ".", "strip", "(", ")", ".", "replace", "(", "'\\n'", ",", "''", ")", ".", "replace", "(", "' '", ",", "' '", ")", "]", "result", "=", "'\\n'", ".", "join", "(", "s", ")", "return", "result" ]
32.666667
11.916667
def load_internal_cache(cls, pex, pex_info): """Possibly cache out the internal cache.""" internal_cache = os.path.join(pex, pex_info.internal_cache) with TRACER.timed('Searching dependency cache: %s' % internal_cache, V=2): if os.path.isdir(pex): for dist in find_distributions(internal_cache): yield dist else: for dist in itertools.chain(*cls.write_zipped_internal_cache(pex, pex_info)): yield dist
[ "def", "load_internal_cache", "(", "cls", ",", "pex", ",", "pex_info", ")", ":", "internal_cache", "=", "os", ".", "path", ".", "join", "(", "pex", ",", "pex_info", ".", "internal_cache", ")", "with", "TRACER", ".", "timed", "(", "'Searching dependency cache: %s'", "%", "internal_cache", ",", "V", "=", "2", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "pex", ")", ":", "for", "dist", "in", "find_distributions", "(", "internal_cache", ")", ":", "yield", "dist", "else", ":", "for", "dist", "in", "itertools", ".", "chain", "(", "*", "cls", ".", "write_zipped_internal_cache", "(", "pex", ",", "pex_info", ")", ")", ":", "yield", "dist" ]
45.2
20.6
def on_close(self): ''' Clean up when the connection is closed. ''' log.info('WebSocket connection closed: code=%s, reason=%r', self.close_code, self.close_reason) if self.connection is not None: self.application.client_lost(self.connection)
[ "def", "on_close", "(", "self", ")", ":", "log", ".", "info", "(", "'WebSocket connection closed: code=%s, reason=%r'", ",", "self", ".", "close_code", ",", "self", ".", "close_reason", ")", "if", "self", ".", "connection", "is", "not", "None", ":", "self", ".", "application", ".", "client_lost", "(", "self", ".", "connection", ")" ]
40
26
def line(h1: Union[Histogram1D, "HistogramCollection"], ax: Axes, *, errors: bool = False, **kwargs): """Line plot of 1D histogram.""" show_stats = kwargs.pop("show_stats", False) show_values = kwargs.pop("show_values", False) density = kwargs.pop("density", False) cumulative = kwargs.pop("cumulative", False) value_format = kwargs.pop("value_format", None) text_kwargs = pop_kwargs_with_prefix("text_", kwargs) kwargs["label"] = kwargs.get("label", h1.name) data = get_data(h1, cumulative=cumulative, density=density) _apply_xy_lims(ax, h1, data, kwargs) _add_ticks(ax, h1, kwargs) _add_labels(ax, h1, kwargs) if errors: err_data = get_err_data(h1, cumulative=cumulative, density=density) ax.errorbar(h1.bin_centers, data, yerr=err_data, fmt=kwargs.pop( "fmt", "-"), ecolor=kwargs.pop("ecolor", "black"), **kwargs) else: ax.plot(h1.bin_centers, data, **kwargs) if show_stats: _add_stats_box(h1, ax, stats=show_stats) if show_values: _add_values(ax, h1, data, value_format=value_format, **text_kwargs)
[ "def", "line", "(", "h1", ":", "Union", "[", "Histogram1D", ",", "\"HistogramCollection\"", "]", ",", "ax", ":", "Axes", ",", "*", ",", "errors", ":", "bool", "=", "False", ",", "*", "*", "kwargs", ")", ":", "show_stats", "=", "kwargs", ".", "pop", "(", "\"show_stats\"", ",", "False", ")", "show_values", "=", "kwargs", ".", "pop", "(", "\"show_values\"", ",", "False", ")", "density", "=", "kwargs", ".", "pop", "(", "\"density\"", ",", "False", ")", "cumulative", "=", "kwargs", ".", "pop", "(", "\"cumulative\"", ",", "False", ")", "value_format", "=", "kwargs", ".", "pop", "(", "\"value_format\"", ",", "None", ")", "text_kwargs", "=", "pop_kwargs_with_prefix", "(", "\"text_\"", ",", "kwargs", ")", "kwargs", "[", "\"label\"", "]", "=", "kwargs", ".", "get", "(", "\"label\"", ",", "h1", ".", "name", ")", "data", "=", "get_data", "(", "h1", ",", "cumulative", "=", "cumulative", ",", "density", "=", "density", ")", "_apply_xy_lims", "(", "ax", ",", "h1", ",", "data", ",", "kwargs", ")", "_add_ticks", "(", "ax", ",", "h1", ",", "kwargs", ")", "_add_labels", "(", "ax", ",", "h1", ",", "kwargs", ")", "if", "errors", ":", "err_data", "=", "get_err_data", "(", "h1", ",", "cumulative", "=", "cumulative", ",", "density", "=", "density", ")", "ax", ".", "errorbar", "(", "h1", ".", "bin_centers", ",", "data", ",", "yerr", "=", "err_data", ",", "fmt", "=", "kwargs", ".", "pop", "(", "\"fmt\"", ",", "\"-\"", ")", ",", "ecolor", "=", "kwargs", ".", "pop", "(", "\"ecolor\"", ",", "\"black\"", ")", ",", "*", "*", "kwargs", ")", "else", ":", "ax", ".", "plot", "(", "h1", ".", "bin_centers", ",", "data", ",", "*", "*", "kwargs", ")", "if", "show_stats", ":", "_add_stats_box", "(", "h1", ",", "ax", ",", "stats", "=", "show_stats", ")", "if", "show_values", ":", "_add_values", "(", "ax", ",", "h1", ",", "data", ",", "value_format", "=", "value_format", ",", "*", "*", "text_kwargs", ")" ]
39.142857
22.071429
def addRecordsFromThread(self, records): """ Adds the given record to the system. :param records | [<orb.Table>, ..] """ label_mapper = self.labelMapper() icon_mapper = self.iconMapper() tree = None if self.showTreePopup(): tree = self.treePopupWidget() # add the items to the list start = self.count() # update the item information blocked = self.signalsBlocked() self.blockSignals(True) for i, record in enumerate(records): index = start + i self.addItem(label_mapper(record)) self.setItemData(index, wrapVariant(record), Qt.UserRole) if icon_mapper: self.setItemIcon(index, icon_mapper(record)) if record == self._currentRecord: self.setCurrentIndex(self.count() - 1) if tree: XOrbRecordItem(tree, record) self.blockSignals(blocked)
[ "def", "addRecordsFromThread", "(", "self", ",", "records", ")", ":", "label_mapper", "=", "self", ".", "labelMapper", "(", ")", "icon_mapper", "=", "self", ".", "iconMapper", "(", ")", "tree", "=", "None", "if", "self", ".", "showTreePopup", "(", ")", ":", "tree", "=", "self", ".", "treePopupWidget", "(", ")", "# add the items to the list\r", "start", "=", "self", ".", "count", "(", ")", "# update the item information\r", "blocked", "=", "self", ".", "signalsBlocked", "(", ")", "self", ".", "blockSignals", "(", "True", ")", "for", "i", ",", "record", "in", "enumerate", "(", "records", ")", ":", "index", "=", "start", "+", "i", "self", ".", "addItem", "(", "label_mapper", "(", "record", ")", ")", "self", ".", "setItemData", "(", "index", ",", "wrapVariant", "(", "record", ")", ",", "Qt", ".", "UserRole", ")", "if", "icon_mapper", ":", "self", ".", "setItemIcon", "(", "index", ",", "icon_mapper", "(", "record", ")", ")", "if", "record", "==", "self", ".", "_currentRecord", ":", "self", ".", "setCurrentIndex", "(", "self", ".", "count", "(", ")", "-", "1", ")", "if", "tree", ":", "XOrbRecordItem", "(", "tree", ",", "record", ")", "self", ".", "blockSignals", "(", "blocked", ")" ]
32.545455
12.666667
def add_missing_optional_args_with_value_none(args, optional_args): ''' Adds key-value pairs to the passed dictionary, so that afterwards, the dictionary can be used without needing to check for KeyErrors. If the keys passed as a second argument are not present, they are added with None as a value. :args: The dictionary to be completed. :optional_args: The keys that need to be added, if they are not present. :return: The modified dictionary. ''' for name in optional_args: if not name in args.keys(): args[name] = None return args
[ "def", "add_missing_optional_args_with_value_none", "(", "args", ",", "optional_args", ")", ":", "for", "name", "in", "optional_args", ":", "if", "not", "name", "in", "args", ".", "keys", "(", ")", ":", "args", "[", "name", "]", "=", "None", "return", "args" ]
31.947368
19.315789
def commit_branches(sha1): # type: (str) -> List[str] """ Get the name of the branches that this commit belongs to. """ cmd = 'git branch --contains {}'.format(sha1) return shell.run( cmd, capture=True, never_pretend=True ).stdout.strip().split()
[ "def", "commit_branches", "(", "sha1", ")", ":", "# type: (str) -> List[str]", "cmd", "=", "'git branch --contains {}'", ".", "format", "(", "sha1", ")", "return", "shell", ".", "run", "(", "cmd", ",", "capture", "=", "True", ",", "never_pretend", "=", "True", ")", ".", "stdout", ".", "strip", "(", ")", ".", "split", "(", ")" ]
31.333333
13.888889
def buildIcon(icon): """ Builds an icon from the inputed information. :param icon | <variant> """ if icon is None: return QIcon() if type(icon) == buffer: try: icon = QIcon(projexui.generatePixmap(icon)) except: icon = QIcon() else: try: icon = QIcon(icon) except: icon = QIcon() return icon
[ "def", "buildIcon", "(", "icon", ")", ":", "if", "icon", "is", "None", ":", "return", "QIcon", "(", ")", "if", "type", "(", "icon", ")", "==", "buffer", ":", "try", ":", "icon", "=", "QIcon", "(", "projexui", ".", "generatePixmap", "(", "icon", ")", ")", "except", ":", "icon", "=", "QIcon", "(", ")", "else", ":", "try", ":", "icon", "=", "QIcon", "(", "icon", ")", "except", ":", "icon", "=", "QIcon", "(", ")", "return", "icon" ]
24.333333
16.142857
def get_community_by_name(self, name, token=None): """ Get a community based on its name. :param name: The name of the target community. :type name: string :param token: (optional) A valid token for the user in question. :type token: None | string :returns: The requested community. :rtype: dict """ parameters = dict() parameters['name'] = name if token: parameters['token'] = token response = self.request('midas.community.get', parameters) return response
[ "def", "get_community_by_name", "(", "self", ",", "name", ",", "token", "=", "None", ")", ":", "parameters", "=", "dict", "(", ")", "parameters", "[", "'name'", "]", "=", "name", "if", "token", ":", "parameters", "[", "'token'", "]", "=", "token", "response", "=", "self", ".", "request", "(", "'midas.community.get'", ",", "parameters", ")", "return", "response" ]
33.352941
13.352941
def backprop(self, input_data, df_output, cache=None): """ Backpropagate through the hidden layer **Parameters:** input_data : ``GPUArray`` Input data to compute activations for. df_output : ``GPUArray`` Gradients with respect to the activations of this layer (received from the layer above). cache : list of ``GPUArray`` Cache obtained from forward pass. If the cache is provided, then the activations are not recalculated. **Returns:** gradients : tuple of ``GPUArray`` Gradients with respect to the weights and biases in the form ``(df_weights, df_biases)``. df_input : ``GPUArray`` Gradients with respect to the input. """ # Get cache if it wasn't provided if cache is None: cache = self.feed_forward(input_data, prediction=False) if len(cache) == 2: activations, dropout_mask = cache else: activations = cache[0] # Multiply the binary mask with the incoming gradients if self.dropout > 0 and dropout_mask is not None: apply_dropout_mask(df_output, dropout_mask) # Get gradient wrt activation function df_activations = self.df(activations) delta = mult_matrix(df_activations, df_output) # Gradient wrt weights df_W = linalg.dot(input_data, delta, transa='T') # Gradient wrt bias df_b = matrix_sum_out_axis(delta, 0) # Gradient wrt inputs df_input = linalg.dot(delta, self.W, transb='T') # L1 weight decay if self.l1_penalty_weight: df_W += self.l1_penalty_weight * sign(self.W) # L2 weight decay if self.l2_penalty_weight: df_W += self.l2_penalty_weight * self.W return (df_W, df_b), df_input
[ "def", "backprop", "(", "self", ",", "input_data", ",", "df_output", ",", "cache", "=", "None", ")", ":", "# Get cache if it wasn't provided", "if", "cache", "is", "None", ":", "cache", "=", "self", ".", "feed_forward", "(", "input_data", ",", "prediction", "=", "False", ")", "if", "len", "(", "cache", ")", "==", "2", ":", "activations", ",", "dropout_mask", "=", "cache", "else", ":", "activations", "=", "cache", "[", "0", "]", "# Multiply the binary mask with the incoming gradients", "if", "self", ".", "dropout", ">", "0", "and", "dropout_mask", "is", "not", "None", ":", "apply_dropout_mask", "(", "df_output", ",", "dropout_mask", ")", "# Get gradient wrt activation function", "df_activations", "=", "self", ".", "df", "(", "activations", ")", "delta", "=", "mult_matrix", "(", "df_activations", ",", "df_output", ")", "# Gradient wrt weights", "df_W", "=", "linalg", ".", "dot", "(", "input_data", ",", "delta", ",", "transa", "=", "'T'", ")", "# Gradient wrt bias", "df_b", "=", "matrix_sum_out_axis", "(", "delta", ",", "0", ")", "# Gradient wrt inputs", "df_input", "=", "linalg", ".", "dot", "(", "delta", ",", "self", ".", "W", ",", "transb", "=", "'T'", ")", "# L1 weight decay", "if", "self", ".", "l1_penalty_weight", ":", "df_W", "+=", "self", ".", "l1_penalty_weight", "*", "sign", "(", "self", ".", "W", ")", "# L2 weight decay", "if", "self", ".", "l2_penalty_weight", ":", "df_W", "+=", "self", ".", "l2_penalty_weight", "*", "self", ".", "W", "return", "(", "df_W", ",", "df_b", ")", ",", "df_input" ]
31.5
18.65
def _fetch_dimensions(self, dataset): """ Iterate through semesters, counties and municipalities. """ yield Dimension(u"school") yield Dimension(u"year", datatype="year") yield Dimension(u"semester", datatype="academic_term", dialect="swedish") # HT/VT yield Dimension(u"municipality", datatype="year", domain="sweden/municipalities")
[ "def", "_fetch_dimensions", "(", "self", ",", "dataset", ")", ":", "yield", "Dimension", "(", "u\"school\"", ")", "yield", "Dimension", "(", "u\"year\"", ",", "datatype", "=", "\"year\"", ")", "yield", "Dimension", "(", "u\"semester\"", ",", "datatype", "=", "\"academic_term\"", ",", "dialect", "=", "\"swedish\"", ")", "# HT/VT", "yield", "Dimension", "(", "u\"municipality\"", ",", "datatype", "=", "\"year\"", ",", "domain", "=", "\"sweden/municipalities\"", ")" ]
41
4.666667
def Matches(self, file_entry): """Compares the file entry against the filter collection. Args: file_entry (dfvfs.FileEntry): file entry to compare. Returns: bool: True if the file entry matches one of the filters. If no filters are provided or applicable the result will be True. """ if not self._filters: return True results = [] for file_entry_filter in self._filters: result = file_entry_filter.Matches(file_entry) results.append(result) return True in results or False not in results
[ "def", "Matches", "(", "self", ",", "file_entry", ")", ":", "if", "not", "self", ".", "_filters", ":", "return", "True", "results", "=", "[", "]", "for", "file_entry_filter", "in", "self", ".", "_filters", ":", "result", "=", "file_entry_filter", ".", "Matches", "(", "file_entry", ")", "results", ".", "append", "(", "result", ")", "return", "True", "in", "results", "or", "False", "not", "in", "results" ]
28.684211
21.210526
def add(self, fig, title, minX, maxX, offsetAdjuster=None, sequenceFetcher=None): """ Find the features for a sequence title. If there aren't too many, add the features to C{fig}. Return information about the features, as described below. @param fig: A matplotlib figure. @param title: A C{str} sequence title from a BLAST hit. Of the form 'gi|63148399|gb|DQ011818.1| Description...'. @param minX: The smallest x coordinate. @param maxX: The largest x coordinate. @param offsetAdjuster: a function for adjusting feature X axis offsets for plotting. @param sequenceFetcher: A function that takes a sequence title and a database name and returns a C{Bio.SeqIO} instance. If C{None}, use L{dark.entrez.getSequence}. @return: If we seem to be offline, return C{None}. Otherwise, return a L{FeatureList} instance. """ offsetAdjuster = offsetAdjuster or (lambda x: x) fig.set_title('Target sequence features', fontsize=self.TITLE_FONTSIZE) fig.set_yticks([]) features = FeatureList(title, self.DATABASE, self.WANTED_TYPES, sequenceFetcher=sequenceFetcher) if features.offline: fig.text(minX + (maxX - minX) / 3.0, 0, 'You (or Genbank) appear to be offline.', fontsize=self.FONTSIZE) fig.axis([minX, maxX, -1, 1]) return None # If no interesting features were found, display a message saying # so in the figure. Otherwise, if we don't have too many features # to plot, add the feature info to the figure. nFeatures = len(features) if nFeatures == 0: # fig.text(minX + (maxX - minX) / 3.0, 0, 'No features found', # fontsize=self.FONTSIZE) fig.text(0.5, 0.5, 'No features found', horizontalalignment='center', verticalalignment='center', transform=fig.transAxes, fontsize=self.FONTSIZE) fig.axis([minX, maxX, -1, 1]) elif nFeatures <= self.MAX_FEATURES_TO_DISPLAY: # Call the method in our subclass to do the figure display. self._displayFeatures(fig, features, minX, maxX, offsetAdjuster) else: self.tooManyFeaturesToPlot = True # fig.text(minX + (maxX - minX) / 3.0, 0, # 'Too many features to plot.', fontsize=self.FONTSIZE) fig.text(0.5, 0.5, 'Too many features to plot', horizontalalignment='center', verticalalignment='center', fontsize=self.FONTSIZE, transform=fig.transAxes) fig.axis([minX, maxX, -1, 1]) return features
[ "def", "add", "(", "self", ",", "fig", ",", "title", ",", "minX", ",", "maxX", ",", "offsetAdjuster", "=", "None", ",", "sequenceFetcher", "=", "None", ")", ":", "offsetAdjuster", "=", "offsetAdjuster", "or", "(", "lambda", "x", ":", "x", ")", "fig", ".", "set_title", "(", "'Target sequence features'", ",", "fontsize", "=", "self", ".", "TITLE_FONTSIZE", ")", "fig", ".", "set_yticks", "(", "[", "]", ")", "features", "=", "FeatureList", "(", "title", ",", "self", ".", "DATABASE", ",", "self", ".", "WANTED_TYPES", ",", "sequenceFetcher", "=", "sequenceFetcher", ")", "if", "features", ".", "offline", ":", "fig", ".", "text", "(", "minX", "+", "(", "maxX", "-", "minX", ")", "/", "3.0", ",", "0", ",", "'You (or Genbank) appear to be offline.'", ",", "fontsize", "=", "self", ".", "FONTSIZE", ")", "fig", ".", "axis", "(", "[", "minX", ",", "maxX", ",", "-", "1", ",", "1", "]", ")", "return", "None", "# If no interesting features were found, display a message saying", "# so in the figure. Otherwise, if we don't have too many features", "# to plot, add the feature info to the figure.", "nFeatures", "=", "len", "(", "features", ")", "if", "nFeatures", "==", "0", ":", "# fig.text(minX + (maxX - minX) / 3.0, 0, 'No features found',", "# fontsize=self.FONTSIZE)", "fig", ".", "text", "(", "0.5", ",", "0.5", ",", "'No features found'", ",", "horizontalalignment", "=", "'center'", ",", "verticalalignment", "=", "'center'", ",", "transform", "=", "fig", ".", "transAxes", ",", "fontsize", "=", "self", ".", "FONTSIZE", ")", "fig", ".", "axis", "(", "[", "minX", ",", "maxX", ",", "-", "1", ",", "1", "]", ")", "elif", "nFeatures", "<=", "self", ".", "MAX_FEATURES_TO_DISPLAY", ":", "# Call the method in our subclass to do the figure display.", "self", ".", "_displayFeatures", "(", "fig", ",", "features", ",", "minX", ",", "maxX", ",", "offsetAdjuster", ")", "else", ":", "self", ".", "tooManyFeaturesToPlot", "=", "True", "# fig.text(minX + (maxX - minX) / 3.0, 0,", "# 'Too many features to plot.', fontsize=self.FONTSIZE)", "fig", ".", "text", "(", "0.5", ",", "0.5", ",", "'Too many features to plot'", ",", "horizontalalignment", "=", "'center'", ",", "verticalalignment", "=", "'center'", ",", "fontsize", "=", "self", ".", "FONTSIZE", ",", "transform", "=", "fig", ".", "transAxes", ")", "fig", ".", "axis", "(", "[", "minX", ",", "maxX", ",", "-", "1", ",", "1", "]", ")", "return", "features" ]
46.266667
21.5
def _download_rtd_zip(rtd_version=None, **kwargs): """ Download and extract HTML ZIP from RTD to installed doc data path. Download is skipped if content already exists. Parameters ---------- rtd_version : str or `None` RTD version to download; e.g., "latest", "stable", or "v2.6.0". If not given, download closest match to software version. kwargs : dict Keywords for ``urlretrieve()``. Returns ------- index_html : str Path to local "index.html". """ # https://github.com/ejeschke/ginga/pull/451#issuecomment-298403134 if not toolkit.family.startswith('qt'): raise ValueError('Downloaded documentation not compatible with {} ' 'UI toolkit browser'.format(toolkit.family)) if rtd_version is None: rtd_version = _find_rtd_version() data_path = os.path.dirname( _find_pkg_data_path('help.html', package='ginga.doc')) index_html = os.path.join(data_path, 'index.html') # There is a previous download of documentation; Do nothing. # There is no check if downloaded version is outdated; The idea is that # this folder would be empty again when installing new version. if os.path.isfile(index_html): return index_html url = ('https://readthedocs.org/projects/ginga/downloads/htmlzip/' '{}/'.format(rtd_version)) local_path = urllib.request.urlretrieve(url, **kwargs)[0] with zipfile.ZipFile(local_path, 'r') as zf: zf.extractall(data_path) # RTD makes an undesirable sub-directory, so move everything there # up one level and delete it. subdir = os.path.join(data_path, 'ginga-{}'.format(rtd_version)) for s in os.listdir(subdir): src = os.path.join(subdir, s) if os.path.isfile(src): shutil.copy(src, data_path) else: # directory shutil.copytree(src, os.path.join(data_path, s)) shutil.rmtree(subdir) if not os.path.isfile(index_html): raise OSError( '{} is missing; Ginga doc download failed'.format(index_html)) return index_html
[ "def", "_download_rtd_zip", "(", "rtd_version", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# https://github.com/ejeschke/ginga/pull/451#issuecomment-298403134", "if", "not", "toolkit", ".", "family", ".", "startswith", "(", "'qt'", ")", ":", "raise", "ValueError", "(", "'Downloaded documentation not compatible with {} '", "'UI toolkit browser'", ".", "format", "(", "toolkit", ".", "family", ")", ")", "if", "rtd_version", "is", "None", ":", "rtd_version", "=", "_find_rtd_version", "(", ")", "data_path", "=", "os", ".", "path", ".", "dirname", "(", "_find_pkg_data_path", "(", "'help.html'", ",", "package", "=", "'ginga.doc'", ")", ")", "index_html", "=", "os", ".", "path", ".", "join", "(", "data_path", ",", "'index.html'", ")", "# There is a previous download of documentation; Do nothing.", "# There is no check if downloaded version is outdated; The idea is that", "# this folder would be empty again when installing new version.", "if", "os", ".", "path", ".", "isfile", "(", "index_html", ")", ":", "return", "index_html", "url", "=", "(", "'https://readthedocs.org/projects/ginga/downloads/htmlzip/'", "'{}/'", ".", "format", "(", "rtd_version", ")", ")", "local_path", "=", "urllib", ".", "request", ".", "urlretrieve", "(", "url", ",", "*", "*", "kwargs", ")", "[", "0", "]", "with", "zipfile", ".", "ZipFile", "(", "local_path", ",", "'r'", ")", "as", "zf", ":", "zf", ".", "extractall", "(", "data_path", ")", "# RTD makes an undesirable sub-directory, so move everything there", "# up one level and delete it.", "subdir", "=", "os", ".", "path", ".", "join", "(", "data_path", ",", "'ginga-{}'", ".", "format", "(", "rtd_version", ")", ")", "for", "s", "in", "os", ".", "listdir", "(", "subdir", ")", ":", "src", "=", "os", ".", "path", ".", "join", "(", "subdir", ",", "s", ")", "if", "os", ".", "path", ".", "isfile", "(", "src", ")", ":", "shutil", ".", "copy", "(", "src", ",", "data_path", ")", "else", ":", "# directory", "shutil", ".", "copytree", "(", "src", ",", "os", ".", "path", ".", "join", "(", "data_path", ",", "s", ")", ")", "shutil", ".", "rmtree", "(", "subdir", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "index_html", ")", ":", "raise", "OSError", "(", "'{} is missing; Ginga doc download failed'", ".", "format", "(", "index_html", ")", ")", "return", "index_html" ]
34.016393
21.229508
def account_following(self, id, max_id=None, min_id=None, since_id=None, limit=None): """ Fetch users the given user is following. Returns a list of `user dicts`_. """ id = self.__unpack_id(id) if max_id != None: max_id = self.__unpack_id(max_id) if min_id != None: min_id = self.__unpack_id(min_id) if since_id != None: since_id = self.__unpack_id(since_id) params = self.__generate_params(locals(), ['id']) url = '/api/v1/accounts/{0}/following'.format(str(id)) return self.__api_request('GET', url, params)
[ "def", "account_following", "(", "self", ",", "id", ",", "max_id", "=", "None", ",", "min_id", "=", "None", ",", "since_id", "=", "None", ",", "limit", "=", "None", ")", ":", "id", "=", "self", ".", "__unpack_id", "(", "id", ")", "if", "max_id", "!=", "None", ":", "max_id", "=", "self", ".", "__unpack_id", "(", "max_id", ")", "if", "min_id", "!=", "None", ":", "min_id", "=", "self", ".", "__unpack_id", "(", "min_id", ")", "if", "since_id", "!=", "None", ":", "since_id", "=", "self", ".", "__unpack_id", "(", "since_id", ")", "params", "=", "self", ".", "__generate_params", "(", "locals", "(", ")", ",", "[", "'id'", "]", ")", "url", "=", "'/api/v1/accounts/{0}/following'", ".", "format", "(", "str", "(", "id", ")", ")", "return", "self", ".", "__api_request", "(", "'GET'", ",", "url", ",", "params", ")" ]
34.052632
15.947368
def _server_property(self, attr_name): """An attribute of the current server's description. If the client is not connected, this will block until a connection is established or raise ServerSelectionTimeoutError if no server is available. Not threadsafe if used multiple times in a single method, since the server may change. In such cases, store a local reference to a ServerDescription first, then use its properties. """ server = self._topology.select_server( writable_server_selector) return getattr(server.description, attr_name)
[ "def", "_server_property", "(", "self", ",", "attr_name", ")", ":", "server", "=", "self", ".", "_topology", ".", "select_server", "(", "writable_server_selector", ")", "return", "getattr", "(", "server", ".", "description", ",", "attr_name", ")" ]
40.933333
21.133333
def information_title_header_element(feature, parent): """Retrieve information title header string from definitions.""" _ = feature, parent # NOQA header = information_title_header['string_format'] return header.capitalize()
[ "def", "information_title_header_element", "(", "feature", ",", "parent", ")", ":", "_", "=", "feature", ",", "parent", "# NOQA", "header", "=", "information_title_header", "[", "'string_format'", "]", "return", "header", ".", "capitalize", "(", ")" ]
47.4
9.4
def make_output_cf_compliant(self, simulation_start_datetime, comid_lat_lon_z_file="", project_name="Normal RAPID project"): """ This function converts the RAPID output to be CF compliant. This will require a *comid_lat_lon_z.csv* file (See: :func:`~RAPIDpy.gis.centroid.FlowlineToPoint` to generate the file). .. note:: It prepends time an initial flow to your simulation from the *qinit_file*. If no qinit file is given, an initial value of zero is added. .. warning:: This will delete your original Qout file. Parameters ---------- simulation_start_datetime: datetime Datetime object with the start date of the simulation. comid_lat_lon_z_file: str, optional Path to the *comid_lat_lon_z.csv* file. If none given, spatial information will be skipped. project_name: str, optional Name of project to add to the RAPID output file. Example: .. code:: python from RAPIDpy import RAPID rapid_manager = RAPID( rapid_executable_location='~/work/rapid/run/rapid' use_all_processors=True, ZS_TauR=24*3600, ZS_dtR=15*60, ZS_TauM=365*24*3600, ZS_dtM=24*3600 rapid_connect_file='../rapid-io/input/rapid_connect.csv', Vlat_file='../rapid-io/input/m3_riv.nc', riv_bas_id_file='../rapid-io/input/riv_bas_id.csv', k_file='../rapid-io/input/k.csv', x_file='../rapid-io/input/x.csv', Qout_file='../rapid-io/output/Qout.nc' ) rapid_manager.run() rapid_manager.make_output_cf_compliant( simulation_start_datetime=datetime.datetime(1980, 1, 1), comid_lat_lon_z_file='../rapid-io/input/comid_lat_lon_z.csv', project_name="ERA Interim Historical flows by US Army ERDC" ) """ with RAPIDDataset(self.Qout_file) as qout_nc: if qout_nc.is_time_variable_valid(): log("RAPID Qout file already CF compliant ...", "INFO") return crv = ConvertRAPIDOutputToCF( rapid_output_file=self.Qout_file, start_datetime=simulation_start_datetime, time_step=self.ZS_TauR, qinit_file=self.Qinit_file, comid_lat_lon_z_file=comid_lat_lon_z_file, rapid_connect_file=self.rapid_connect_file, project_name=project_name, output_id_dim_name='rivid', output_flow_var_name='Qout', print_debug=False ) crv.convert()
[ "def", "make_output_cf_compliant", "(", "self", ",", "simulation_start_datetime", ",", "comid_lat_lon_z_file", "=", "\"\"", ",", "project_name", "=", "\"Normal RAPID project\"", ")", ":", "with", "RAPIDDataset", "(", "self", ".", "Qout_file", ")", "as", "qout_nc", ":", "if", "qout_nc", ".", "is_time_variable_valid", "(", ")", ":", "log", "(", "\"RAPID Qout file already CF compliant ...\"", ",", "\"INFO\"", ")", "return", "crv", "=", "ConvertRAPIDOutputToCF", "(", "rapid_output_file", "=", "self", ".", "Qout_file", ",", "start_datetime", "=", "simulation_start_datetime", ",", "time_step", "=", "self", ".", "ZS_TauR", ",", "qinit_file", "=", "self", ".", "Qinit_file", ",", "comid_lat_lon_z_file", "=", "comid_lat_lon_z_file", ",", "rapid_connect_file", "=", "self", ".", "rapid_connect_file", ",", "project_name", "=", "project_name", ",", "output_id_dim_name", "=", "'rivid'", ",", "output_flow_var_name", "=", "'Qout'", ",", "print_debug", "=", "False", ")", "crv", ".", "convert", "(", ")" ]
37.236842
19.105263
def get_network_ipv4(self, id_network): """ Get networkipv4 :param id_network: Identifier of the Network. Integer value and greater than zero. :return: Following dictionary: :: {'network': {'id': < id_networkIpv6 >, 'network_type': < id_tipo_rede >, 'ambiente_vip': < id_ambiente_vip >, 'vlan': <id_vlan> 'oct1': < rede_oct1 >, 'oct2': < rede_oct2 >, 'oct3': < rede_oct3 >, 'oct4': < rede_oct4 > 'blocK': < bloco >, 'mask_oct1': < mascara_oct1 >, 'mask_oct2': < mascara_oct2 >, 'mask_oct3': < mascara_oct3 >, 'mask_oct4': < mascara_oct4 >, 'active': < ativada >, 'broadcast':<'broadcast>, }} :raise NetworkIPv4NotFoundError: NetworkIPV4 not found. :raise InvalidValueError: Invalid ID for NetworkIpv4 :raise NetworkIPv4Error: Error in NetworkIpv4 :raise XMLError: Networkapi failed to generate the XML response. """ if not is_valid_int_param(id_network): raise InvalidParameterError( u'O id do rede ip4 foi informado incorretamente.') url = 'network/ipv4/id/' + str(id_network) + '/' code, xml = self.submit(None, 'GET', url) return self.response(code, xml)
[ "def", "get_network_ipv4", "(", "self", ",", "id_network", ")", ":", "if", "not", "is_valid_int_param", "(", "id_network", ")", ":", "raise", "InvalidParameterError", "(", "u'O id do rede ip4 foi informado incorretamente.'", ")", "url", "=", "'network/ipv4/id/'", "+", "str", "(", "id_network", ")", "+", "'/'", "code", ",", "xml", "=", "self", ".", "submit", "(", "None", ",", "'GET'", ",", "url", ")", "return", "self", ".", "response", "(", "code", ",", "xml", ")" ]
32.9
16.25
def update_module_names(cr, namespec, merge_modules=False): """Deal with changed module names, making all the needed changes on the related tables, like XML-IDs, translations, and so on. :param namespec: list of tuples of (old name, new name) :param merge_modules: Specify if the operation should be a merge instead of just a renaming. """ for (old_name, new_name) in namespec: if merge_modules: # Delete meta entries, that will avoid the entry removal # They will be recreated by the new module anyhow. query = "SELECT id FROM ir_module_module WHERE name = %s" cr.execute(query, [old_name]) row = cr.fetchone() if row: old_id = row[0] query = "DELETE FROM ir_model_constraint WHERE module = %s" logged_query(cr, query, [old_id]) query = "DELETE FROM ir_model_relation WHERE module = %s" logged_query(cr, query, [old_id]) else: query = "UPDATE ir_module_module SET name = %s WHERE name = %s" logged_query(cr, query, (new_name, old_name)) query = ("UPDATE ir_model_data SET name = %s " "WHERE name = %s AND module = 'base' AND " "model='ir.module.module' ") logged_query(cr, query, ("module_%s" % new_name, "module_%s" % old_name)) # The subselect allows to avoid duplicated XML-IDs query = ("UPDATE ir_model_data SET module = %s " "WHERE module = %s AND name NOT IN " "(SELECT name FROM ir_model_data WHERE module = %s)") logged_query(cr, query, (new_name, old_name, new_name)) # Rename the remaining occurrences for let Odoo's update process # to auto-remove related resources query = ("UPDATE ir_model_data " "SET name = name || '_openupgrade_' || id, " "module = %s " "WHERE module = %s") logged_query(cr, query, (new_name, old_name)) query = ("UPDATE ir_module_module_dependency SET name = %s " "WHERE name = %s") logged_query(cr, query, (new_name, old_name)) if version_info[0] > 7: query = ("UPDATE ir_translation SET module = %s " "WHERE module = %s") logged_query(cr, query, (new_name, old_name)) if merge_modules: # Conserve old_name's state if new_name is uninstalled logged_query( cr, "UPDATE ir_module_module m1 SET state=m2.state " "FROM ir_module_module m2 WHERE m1.name=%s AND " "m2.name=%s AND m1.state='uninstalled'", (new_name, old_name), ) query = "DELETE FROM ir_module_module WHERE name = %s" logged_query(cr, query, [old_name]) logged_query( cr, "DELETE FROM ir_model_data WHERE module = 'base' " "AND model='ir.module.module' AND name = %s", ('module_%s' % old_name,), )
[ "def", "update_module_names", "(", "cr", ",", "namespec", ",", "merge_modules", "=", "False", ")", ":", "for", "(", "old_name", ",", "new_name", ")", "in", "namespec", ":", "if", "merge_modules", ":", "# Delete meta entries, that will avoid the entry removal", "# They will be recreated by the new module anyhow.", "query", "=", "\"SELECT id FROM ir_module_module WHERE name = %s\"", "cr", ".", "execute", "(", "query", ",", "[", "old_name", "]", ")", "row", "=", "cr", ".", "fetchone", "(", ")", "if", "row", ":", "old_id", "=", "row", "[", "0", "]", "query", "=", "\"DELETE FROM ir_model_constraint WHERE module = %s\"", "logged_query", "(", "cr", ",", "query", ",", "[", "old_id", "]", ")", "query", "=", "\"DELETE FROM ir_model_relation WHERE module = %s\"", "logged_query", "(", "cr", ",", "query", ",", "[", "old_id", "]", ")", "else", ":", "query", "=", "\"UPDATE ir_module_module SET name = %s WHERE name = %s\"", "logged_query", "(", "cr", ",", "query", ",", "(", "new_name", ",", "old_name", ")", ")", "query", "=", "(", "\"UPDATE ir_model_data SET name = %s \"", "\"WHERE name = %s AND module = 'base' AND \"", "\"model='ir.module.module' \"", ")", "logged_query", "(", "cr", ",", "query", ",", "(", "\"module_%s\"", "%", "new_name", ",", "\"module_%s\"", "%", "old_name", ")", ")", "# The subselect allows to avoid duplicated XML-IDs", "query", "=", "(", "\"UPDATE ir_model_data SET module = %s \"", "\"WHERE module = %s AND name NOT IN \"", "\"(SELECT name FROM ir_model_data WHERE module = %s)\"", ")", "logged_query", "(", "cr", ",", "query", ",", "(", "new_name", ",", "old_name", ",", "new_name", ")", ")", "# Rename the remaining occurrences for let Odoo's update process", "# to auto-remove related resources", "query", "=", "(", "\"UPDATE ir_model_data \"", "\"SET name = name || '_openupgrade_' || id, \"", "\"module = %s \"", "\"WHERE module = %s\"", ")", "logged_query", "(", "cr", ",", "query", ",", "(", "new_name", ",", "old_name", ")", ")", "query", "=", "(", "\"UPDATE ir_module_module_dependency SET name = %s \"", "\"WHERE name = %s\"", ")", "logged_query", "(", "cr", ",", "query", ",", "(", "new_name", ",", "old_name", ")", ")", "if", "version_info", "[", "0", "]", ">", "7", ":", "query", "=", "(", "\"UPDATE ir_translation SET module = %s \"", "\"WHERE module = %s\"", ")", "logged_query", "(", "cr", ",", "query", ",", "(", "new_name", ",", "old_name", ")", ")", "if", "merge_modules", ":", "# Conserve old_name's state if new_name is uninstalled", "logged_query", "(", "cr", ",", "\"UPDATE ir_module_module m1 SET state=m2.state \"", "\"FROM ir_module_module m2 WHERE m1.name=%s AND \"", "\"m2.name=%s AND m1.state='uninstalled'\"", ",", "(", "new_name", ",", "old_name", ")", ",", ")", "query", "=", "\"DELETE FROM ir_module_module WHERE name = %s\"", "logged_query", "(", "cr", ",", "query", ",", "[", "old_name", "]", ")", "logged_query", "(", "cr", ",", "\"DELETE FROM ir_model_data WHERE module = 'base' \"", "\"AND model='ir.module.module' AND name = %s\"", ",", "(", "'module_%s'", "%", "old_name", ",", ")", ",", ")" ]
47.815385
17.292308
def get_cmd_output(*args, encoding: str = SYS_ENCODING) -> str: """ Returns text output of a command. """ log.debug("get_cmd_output(): args = {!r}", args) p = subprocess.Popen(args, stdout=subprocess.PIPE) stdout, stderr = p.communicate() return stdout.decode(encoding, errors='ignore')
[ "def", "get_cmd_output", "(", "*", "args", ",", "encoding", ":", "str", "=", "SYS_ENCODING", ")", "->", "str", ":", "log", ".", "debug", "(", "\"get_cmd_output(): args = {!r}\"", ",", "args", ")", "p", "=", "subprocess", ".", "Popen", "(", "args", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "stdout", ",", "stderr", "=", "p", ".", "communicate", "(", ")", "return", "stdout", ".", "decode", "(", "encoding", ",", "errors", "=", "'ignore'", ")" ]
38.375
8.375
def btc_script_deserialize(script): """ Given a script (hex or bin), decode it into its list of opcodes and data. Return a list of strings and ints. Based on code in pybitcointools (https://github.com/vbuterin/pybitcointools) by Vitalik Buterin """ if isinstance(script, str) and re.match('^[0-9a-fA-F]*$', script): script = binascii.unhexlify(script) # output buffer out = [] pos = 0 while pos < len(script): # next script op... code = encoding.from_byte_to_int(script[pos]) if code == 0: # empty (OP_0) out.append(None) pos += 1 elif code <= 75: # literal numeric constant, followed by a slice of data. # push the slice of data. out.append(script[pos+1:pos+1+code]) pos += 1 + code elif code <= 78: # OP_PUSHDATA1, OP_PUSHDATA2, OP_PUSHDATA4, followed by length and data # push the data itself szsz = pow(2, code - 76) sz = encoding.decode(script[pos+szsz: pos:-1], 256) out.append(script[pos + 1 + szsz : pos + 1 + szsz + sz]) pos += 1 + szsz + sz elif code <= 96: # OP_1NEGATE, OP_RESERVED, OP_1 thru OP_16 # pass -1 for OP_1NEGATE # pass 0 for OP_RESERVED (shouldn't be used anyway) # pass 1 thru 16 for OP_1 thru OP_16 out.append(code - 80) pos += 1 else: # raw opcode out.append(code) pos += 1 # make sure each string is hex'ed out = encoding.json_changebase(out, lambda x: encoding.safe_hexlify(x)) return out
[ "def", "btc_script_deserialize", "(", "script", ")", ":", "if", "isinstance", "(", "script", ",", "str", ")", "and", "re", ".", "match", "(", "'^[0-9a-fA-F]*$'", ",", "script", ")", ":", "script", "=", "binascii", ".", "unhexlify", "(", "script", ")", "# output buffer", "out", "=", "[", "]", "pos", "=", "0", "while", "pos", "<", "len", "(", "script", ")", ":", "# next script op...", "code", "=", "encoding", ".", "from_byte_to_int", "(", "script", "[", "pos", "]", ")", "if", "code", "==", "0", ":", "# empty (OP_0)", "out", ".", "append", "(", "None", ")", "pos", "+=", "1", "elif", "code", "<=", "75", ":", "# literal numeric constant, followed by a slice of data.", "# push the slice of data.", "out", ".", "append", "(", "script", "[", "pos", "+", "1", ":", "pos", "+", "1", "+", "code", "]", ")", "pos", "+=", "1", "+", "code", "elif", "code", "<=", "78", ":", "# OP_PUSHDATA1, OP_PUSHDATA2, OP_PUSHDATA4, followed by length and data", "# push the data itself", "szsz", "=", "pow", "(", "2", ",", "code", "-", "76", ")", "sz", "=", "encoding", ".", "decode", "(", "script", "[", "pos", "+", "szsz", ":", "pos", ":", "-", "1", "]", ",", "256", ")", "out", ".", "append", "(", "script", "[", "pos", "+", "1", "+", "szsz", ":", "pos", "+", "1", "+", "szsz", "+", "sz", "]", ")", "pos", "+=", "1", "+", "szsz", "+", "sz", "elif", "code", "<=", "96", ":", "# OP_1NEGATE, OP_RESERVED, OP_1 thru OP_16", "# pass -1 for OP_1NEGATE", "# pass 0 for OP_RESERVED (shouldn't be used anyway)", "# pass 1 thru 16 for OP_1 thru OP_16", "out", ".", "append", "(", "code", "-", "80", ")", "pos", "+=", "1", "else", ":", "# raw opcode", "out", ".", "append", "(", "code", ")", "pos", "+=", "1", "# make sure each string is hex'ed", "out", "=", "encoding", ".", "json_changebase", "(", "out", ",", "lambda", "x", ":", "encoding", ".", "safe_hexlify", "(", "x", ")", ")", "return", "out" ]
30.109091
20.763636
def match_names(self, *valist, **kwargs): """performs taxonomic name resolution. See https://github.com/OpenTreeOfLife/opentree/wiki/Open-Tree-of-Life-APIs#match_names with the exception that "ids" in the API call is referred has the name "id_list" in this function. The most commonly used kwargs are: - context_name=<name> (see contexts and infer_context methods) - do_approximate_matching=False (to speed up the search) - include_dubious=True see https://github.com/OpenTreeOfLife/reference-taxonomy/wiki/taxon-flags - include_deprecated=True to see deprecated taxa (see previous link to documentation about flags) - wrap_response=True to return a TNRSRespose object (rather than the "raw" response of the web-services). """ if len(valist) == 1: if not is_str_type(valist[0]): return self.taxomachine.TNRS(*valist, **kwargs) return self.taxomachine.TNRS(*valist, **kwargs)
[ "def", "match_names", "(", "self", ",", "*", "valist", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "valist", ")", "==", "1", ":", "if", "not", "is_str_type", "(", "valist", "[", "0", "]", ")", ":", "return", "self", ".", "taxomachine", ".", "TNRS", "(", "*", "valist", ",", "*", "*", "kwargs", ")", "return", "self", ".", "taxomachine", ".", "TNRS", "(", "*", "valist", ",", "*", "*", "kwargs", ")" ]
66.933333
30.066667
def _remove_wire_nets(block): """ Remove all wire nodes from the block. """ wire_src_dict = _ProducerList() wire_removal_set = set() # set of all wirevectors to be removed # one pass to build the map of value producers and # all of the nets and wires to be removed for net in block.logic: if net.op == 'w': wire_src_dict[net.dests[0]] = net.args[0] if not isinstance(net.dests[0], Output): wire_removal_set.add(net.dests[0]) # second full pass to create the new logic without the wire nets new_logic = set() for net in block.logic: if net.op != 'w' or isinstance(net.dests[0], Output): new_args = tuple(wire_src_dict.find_producer(x) for x in net.args) new_net = LogicNet(net.op, net.op_param, new_args, net.dests) new_logic.add(new_net) # now update the block with the new logic and remove wirevectors block.logic = new_logic for dead_wirevector in wire_removal_set: del block.wirevector_by_name[dead_wirevector.name] block.wirevector_set.remove(dead_wirevector) block.sanity_check()
[ "def", "_remove_wire_nets", "(", "block", ")", ":", "wire_src_dict", "=", "_ProducerList", "(", ")", "wire_removal_set", "=", "set", "(", ")", "# set of all wirevectors to be removed", "# one pass to build the map of value producers and", "# all of the nets and wires to be removed", "for", "net", "in", "block", ".", "logic", ":", "if", "net", ".", "op", "==", "'w'", ":", "wire_src_dict", "[", "net", ".", "dests", "[", "0", "]", "]", "=", "net", ".", "args", "[", "0", "]", "if", "not", "isinstance", "(", "net", ".", "dests", "[", "0", "]", ",", "Output", ")", ":", "wire_removal_set", ".", "add", "(", "net", ".", "dests", "[", "0", "]", ")", "# second full pass to create the new logic without the wire nets", "new_logic", "=", "set", "(", ")", "for", "net", "in", "block", ".", "logic", ":", "if", "net", ".", "op", "!=", "'w'", "or", "isinstance", "(", "net", ".", "dests", "[", "0", "]", ",", "Output", ")", ":", "new_args", "=", "tuple", "(", "wire_src_dict", ".", "find_producer", "(", "x", ")", "for", "x", "in", "net", ".", "args", ")", "new_net", "=", "LogicNet", "(", "net", ".", "op", ",", "net", ".", "op_param", ",", "new_args", ",", "net", ".", "dests", ")", "new_logic", ".", "add", "(", "new_net", ")", "# now update the block with the new logic and remove wirevectors", "block", ".", "logic", "=", "new_logic", "for", "dead_wirevector", "in", "wire_removal_set", ":", "del", "block", ".", "wirevector_by_name", "[", "dead_wirevector", ".", "name", "]", "block", ".", "wirevector_set", ".", "remove", "(", "dead_wirevector", ")", "block", ".", "sanity_check", "(", ")" ]
38.689655
19.827586
def fetch(self, url, encoding=None, force_refetch=False, nocache=False, quiet=True): ''' Fetch a HTML file as binary''' try: if not force_refetch and self.cache is not None and url in self.cache: # try to look for content in cache logging.debug('Retrieving content from cache for {}'.format(url)) return self.cache.retrieve_blob(url, encoding) encoded_url = WebHelper.encode_url(url) req = Request(encoded_url, headers={'User-Agent': 'Mozilla/5.0'}) # support gzip req.add_header('Accept-encoding', 'gzip, deflate') # Open URL getLogger().info("Fetching: {url} |".format(url=url)) response = urlopen(req) content = response.read() # unzip if required if 'Content-Encoding' in response.info() and response.info().get('Content-Encoding') == 'gzip': # unzip with gzip.open(BytesIO(content)) as gzfile: content = gzfile.read() # update cache if required if self.cache is not None and not nocache: if url not in self.cache: self.cache.insert_blob(url, content) return content.decode(encoding) if content and encoding else content except URLError as e: if hasattr(e, 'reason'): getLogger().exception('We failed to reach {}. Reason: {}'.format(url, e.reason)) elif hasattr(e, 'code'): getLogger().exception('The server couldn\'t fulfill the request. Error code: {}'.format(e.code)) else: # Other exception ... getLogger().exception("Fetching error") if not quiet: raise return None
[ "def", "fetch", "(", "self", ",", "url", ",", "encoding", "=", "None", ",", "force_refetch", "=", "False", ",", "nocache", "=", "False", ",", "quiet", "=", "True", ")", ":", "try", ":", "if", "not", "force_refetch", "and", "self", ".", "cache", "is", "not", "None", "and", "url", "in", "self", ".", "cache", ":", "# try to look for content in cache", "logging", ".", "debug", "(", "'Retrieving content from cache for {}'", ".", "format", "(", "url", ")", ")", "return", "self", ".", "cache", ".", "retrieve_blob", "(", "url", ",", "encoding", ")", "encoded_url", "=", "WebHelper", ".", "encode_url", "(", "url", ")", "req", "=", "Request", "(", "encoded_url", ",", "headers", "=", "{", "'User-Agent'", ":", "'Mozilla/5.0'", "}", ")", "# support gzip", "req", ".", "add_header", "(", "'Accept-encoding'", ",", "'gzip, deflate'", ")", "# Open URL", "getLogger", "(", ")", ".", "info", "(", "\"Fetching: {url} |\"", ".", "format", "(", "url", "=", "url", ")", ")", "response", "=", "urlopen", "(", "req", ")", "content", "=", "response", ".", "read", "(", ")", "# unzip if required", "if", "'Content-Encoding'", "in", "response", ".", "info", "(", ")", "and", "response", ".", "info", "(", ")", ".", "get", "(", "'Content-Encoding'", ")", "==", "'gzip'", ":", "# unzip", "with", "gzip", ".", "open", "(", "BytesIO", "(", "content", ")", ")", "as", "gzfile", ":", "content", "=", "gzfile", ".", "read", "(", ")", "# update cache if required", "if", "self", ".", "cache", "is", "not", "None", "and", "not", "nocache", ":", "if", "url", "not", "in", "self", ".", "cache", ":", "self", ".", "cache", ".", "insert_blob", "(", "url", ",", "content", ")", "return", "content", ".", "decode", "(", "encoding", ")", "if", "content", "and", "encoding", "else", "content", "except", "URLError", "as", "e", ":", "if", "hasattr", "(", "e", ",", "'reason'", ")", ":", "getLogger", "(", ")", ".", "exception", "(", "'We failed to reach {}. Reason: {}'", ".", "format", "(", "url", ",", "e", ".", "reason", ")", ")", "elif", "hasattr", "(", "e", ",", "'code'", ")", ":", "getLogger", "(", ")", ".", "exception", "(", "'The server couldn\\'t fulfill the request. Error code: {}'", ".", "format", "(", "e", ".", "code", ")", ")", "else", ":", "# Other exception ...", "getLogger", "(", ")", ".", "exception", "(", "\"Fetching error\"", ")", "if", "not", "quiet", ":", "raise", "return", "None" ]
50.083333
20.972222
def get(self, endpoint, params=None): """Send an HTTP GET request to QuadrigaCX. :param endpoint: API endpoint. :type endpoint: str | unicode :param params: URL parameters. :type params: dict :return: Response body from QuadrigaCX. :rtype: dict :raise quadriga.exceptions.RequestError: If HTTP OK was not returned. """ response = self._session.get( url=self._url + endpoint, params=params, timeout=self._timeout ) return self._handle_response(response)
[ "def", "get", "(", "self", ",", "endpoint", ",", "params", "=", "None", ")", ":", "response", "=", "self", ".", "_session", ".", "get", "(", "url", "=", "self", ".", "_url", "+", "endpoint", ",", "params", "=", "params", ",", "timeout", "=", "self", ".", "_timeout", ")", "return", "self", ".", "_handle_response", "(", "response", ")" ]
33.470588
11.294118
def insert(self, point, payload): """! @brief Insert new point with payload to kd-tree. @param[in] point (list): Coordinates of the point of inserted node. @param[in] payload (any-type): Payload of inserted node. It can be identificator of the node or some useful payload that belongs to the point. @return (node) Inserted node to the kd-tree. """ if self.__root is None: self.__dimension = len(point) self.__root = node(point, payload, None, None, 0) self.__point_comparator = self.__create_point_comparator(type(point)) return self.__root cur_node = self.__root while True: if cur_node.data[cur_node.disc] <= point[cur_node.disc]: # If new node is greater or equal than current node then check right leaf if cur_node.right is None: discriminator = cur_node.disc + 1 if discriminator >= self.__dimension: discriminator = 0 cur_node.right = node(point, payload, None, None, discriminator, cur_node) return cur_node.right else: cur_node = cur_node.right else: # If new node is less than current then check left leaf if cur_node.left is None: discriminator = cur_node.disc + 1 if discriminator >= self.__dimension: discriminator = 0 cur_node.left = node(point, payload, None, None, discriminator, cur_node) return cur_node.left else: cur_node = cur_node.left
[ "def", "insert", "(", "self", ",", "point", ",", "payload", ")", ":", "if", "self", ".", "__root", "is", "None", ":", "self", ".", "__dimension", "=", "len", "(", "point", ")", "self", ".", "__root", "=", "node", "(", "point", ",", "payload", ",", "None", ",", "None", ",", "0", ")", "self", ".", "__point_comparator", "=", "self", ".", "__create_point_comparator", "(", "type", "(", "point", ")", ")", "return", "self", ".", "__root", "cur_node", "=", "self", ".", "__root", "while", "True", ":", "if", "cur_node", ".", "data", "[", "cur_node", ".", "disc", "]", "<=", "point", "[", "cur_node", ".", "disc", "]", ":", "# If new node is greater or equal than current node then check right leaf\r", "if", "cur_node", ".", "right", "is", "None", ":", "discriminator", "=", "cur_node", ".", "disc", "+", "1", "if", "discriminator", ">=", "self", ".", "__dimension", ":", "discriminator", "=", "0", "cur_node", ".", "right", "=", "node", "(", "point", ",", "payload", ",", "None", ",", "None", ",", "discriminator", ",", "cur_node", ")", "return", "cur_node", ".", "right", "else", ":", "cur_node", "=", "cur_node", ".", "right", "else", ":", "# If new node is less than current then check left leaf\r", "if", "cur_node", ".", "left", "is", "None", ":", "discriminator", "=", "cur_node", ".", "disc", "+", "1", "if", "discriminator", ">=", "self", ".", "__dimension", ":", "discriminator", "=", "0", "cur_node", ".", "left", "=", "node", "(", "point", ",", "payload", ",", "None", ",", "None", ",", "discriminator", ",", "cur_node", ")", "return", "cur_node", ".", "left", "else", ":", "cur_node", "=", "cur_node", ".", "left" ]
42.772727
20.022727
def clean(self, value): """Cleans and returns the given value, or raises a ParameterNotValidError exception""" if isinstance(value, (list, tuple)): return [super(FeatureCollectionParameter, self).clean(x) for x in value] raise ParameterNotValidError
[ "def", "clean", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "[", "super", "(", "FeatureCollectionParameter", ",", "self", ")", ".", "clean", "(", "x", ")", "for", "x", "in", "value", "]", "raise", "ParameterNotValidError" ]
40.142857
21.285714
def register_elastic_task(self, *args, **kwargs): """Register an elastic task.""" kwargs["task_class"] = ElasticTask return self.register_task(*args, **kwargs)
[ "def", "register_elastic_task", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "\"task_class\"", "]", "=", "ElasticTask", "return", "self", ".", "register_task", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
45
5.25
def list_nodes_min(call=None): ''' Return a list of the VMs that are on the provider. Only a list of VM names and their state is returned. This is the minimum amount of information needed to check for existing VMs. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt-cloud -f list_nodes_min my-linode-config salt-cloud --function list_nodes_min my-linode-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes_min function must be called with -f or --function.' ) ret = {} nodes = _query('linode', 'list')['DATA'] for node in nodes: name = node['LABEL'] this_node = { 'id': six.text_type(node['LINODEID']), 'state': _get_status_descr_by_id(int(node['STATUS'])) } ret[name] = this_node return ret
[ "def", "list_nodes_min", "(", "call", "=", "None", ")", ":", "if", "call", "==", "'action'", ":", "raise", "SaltCloudSystemExit", "(", "'The list_nodes_min function must be called with -f or --function.'", ")", "ret", "=", "{", "}", "nodes", "=", "_query", "(", "'linode'", ",", "'list'", ")", "[", "'DATA'", "]", "for", "node", "in", "nodes", ":", "name", "=", "node", "[", "'LABEL'", "]", "this_node", "=", "{", "'id'", ":", "six", ".", "text_type", "(", "node", "[", "'LINODEID'", "]", ")", ",", "'state'", ":", "_get_status_descr_by_id", "(", "int", "(", "node", "[", "'STATUS'", "]", ")", ")", "}", "ret", "[", "name", "]", "=", "this_node", "return", "ret" ]
25.969697
25.787879
def cache(self): """Call a user defined query and cache the results""" if not self._bucket_width or self._untrusted_time is None: raise ValueError('QueryCompute must be initialized with a bucket_width ' 'and an untrusted_time in order to write to the cache.') now = datetime.datetime.now() untrusted_time = now - datetime.timedelta(seconds=self._untrusted_time) list(self._query_cache.compute_and_cache_missing_buckets( self._start_time, self._end_time, untrusted_time))
[ "def", "cache", "(", "self", ")", ":", "if", "not", "self", ".", "_bucket_width", "or", "self", ".", "_untrusted_time", "is", "None", ":", "raise", "ValueError", "(", "'QueryCompute must be initialized with a bucket_width '", "'and an untrusted_time in order to write to the cache.'", ")", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "untrusted_time", "=", "now", "-", "datetime", ".", "timedelta", "(", "seconds", "=", "self", ".", "_untrusted_time", ")", "list", "(", "self", ".", "_query_cache", ".", "compute_and_cache_missing_buckets", "(", "self", ".", "_start_time", ",", "self", ".", "_end_time", ",", "untrusted_time", ")", ")" ]
44.416667
22.833333
def supports_export(self, exported_configs, service_intents, export_props): """ Method called by rsa.export_service to ask if this ExportDistributionProvider supports export for given exported_configs (list), service_intents (list), and export_props (dict). If a ExportContainer instance is returned then it is used to export the service. If None is returned, then this distribution provider will not be used to export the service. The default implementation returns self._get_or_create_container. """ return self._get_or_create_container( exported_configs, service_intents, export_props )
[ "def", "supports_export", "(", "self", ",", "exported_configs", ",", "service_intents", ",", "export_props", ")", ":", "return", "self", ".", "_get_or_create_container", "(", "exported_configs", ",", "service_intents", ",", "export_props", ")" ]
42.8125
21.8125
def backup(file_name, jail=None, chroot=None, root=None): ''' Export installed packages into yaml+mtree file CLI Example: .. code-block:: bash salt '*' pkg.backup /tmp/pkg jail Backup packages from the specified jail. Note that this will run the command within the jail, and so the path to the backup file will be relative to the root of the jail CLI Example: .. code-block:: bash salt '*' pkg.backup /tmp/pkg jail=<jail name or id> chroot Backup packages from the specified chroot (ignored if ``jail`` is specified). Note that this will run the command within the chroot, and so the path to the backup file will be relative to the root of the chroot. root Backup packages from the specified root (ignored if ``jail`` is specified). Note that this will run the command within the root, and so the path to the backup file will be relative to the root of the root. CLI Example: .. code-block:: bash salt '*' pkg.backup /tmp/pkg chroot=/path/to/chroot ''' ret = __salt__['cmd.run']( _pkg(jail, chroot, root) + ['backup', '-d', file_name], output_loglevel='trace', python_shell=False ) return ret.split('...')[1]
[ "def", "backup", "(", "file_name", ",", "jail", "=", "None", ",", "chroot", "=", "None", ",", "root", "=", "None", ")", ":", "ret", "=", "__salt__", "[", "'cmd.run'", "]", "(", "_pkg", "(", "jail", ",", "chroot", ",", "root", ")", "+", "[", "'backup'", ",", "'-d'", ",", "file_name", "]", ",", "output_loglevel", "=", "'trace'", ",", "python_shell", "=", "False", ")", "return", "ret", ".", "split", "(", "'...'", ")", "[", "1", "]" ]
28.8
27.777778
def compare_and_update_config(config, update_config, changes, namespace=''): ''' Recursively compare two configs, writing any needed changes to the update_config and capturing changes in the changes dict. ''' if isinstance(config, dict): if not update_config: if config: # the updated config is more valid--report that we are using it changes[namespace] = { 'new': config, 'old': update_config, } return config elif not isinstance(update_config, dict): # new config is a dict, other isn't--new one wins changes[namespace] = { 'new': config, 'old': update_config, } return config else: # compare each key in the base config with the values in the # update_config, overwriting the values that are different but # keeping any that are not defined in config for key, value in six.iteritems(config): _namespace = key if namespace: _namespace = '{0}.{1}'.format(namespace, _namespace) update_config[key] = compare_and_update_config( value, update_config.get(key, None), changes, namespace=_namespace, ) return update_config elif isinstance(config, list): if not update_config: if config: # the updated config is more valid--report that we are using it changes[namespace] = { 'new': config, 'old': update_config, } return config elif not isinstance(update_config, list): # new config is a list, other isn't--new one wins changes[namespace] = { 'new': config, 'old': update_config, } return config else: # iterate through config list, ensuring that each index in the # update_config list is the same for idx, item in enumerate(config): _namespace = '[{0}]'.format(idx) if namespace: _namespace = '{0}{1}'.format(namespace, _namespace) _update = None if len(update_config) > idx: _update = update_config[idx] if _update: update_config[idx] = compare_and_update_config( config[idx], _update, changes, namespace=_namespace, ) else: changes[_namespace] = { 'new': config[idx], 'old': _update, } update_config.append(config[idx]) if len(update_config) > len(config): # trim any items in update_config that are not in config for idx, old_item in enumerate(update_config): if idx < len(config): continue _namespace = '[{0}]'.format(idx) if namespace: _namespace = '{0}{1}'.format(namespace, _namespace) changes[_namespace] = { 'new': None, 'old': old_item, } del update_config[len(config):] return update_config else: if config != update_config: changes[namespace] = { 'new': config, 'old': update_config, } return config
[ "def", "compare_and_update_config", "(", "config", ",", "update_config", ",", "changes", ",", "namespace", "=", "''", ")", ":", "if", "isinstance", "(", "config", ",", "dict", ")", ":", "if", "not", "update_config", ":", "if", "config", ":", "# the updated config is more valid--report that we are using it", "changes", "[", "namespace", "]", "=", "{", "'new'", ":", "config", ",", "'old'", ":", "update_config", ",", "}", "return", "config", "elif", "not", "isinstance", "(", "update_config", ",", "dict", ")", ":", "# new config is a dict, other isn't--new one wins", "changes", "[", "namespace", "]", "=", "{", "'new'", ":", "config", ",", "'old'", ":", "update_config", ",", "}", "return", "config", "else", ":", "# compare each key in the base config with the values in the", "# update_config, overwriting the values that are different but", "# keeping any that are not defined in config", "for", "key", ",", "value", "in", "six", ".", "iteritems", "(", "config", ")", ":", "_namespace", "=", "key", "if", "namespace", ":", "_namespace", "=", "'{0}.{1}'", ".", "format", "(", "namespace", ",", "_namespace", ")", "update_config", "[", "key", "]", "=", "compare_and_update_config", "(", "value", ",", "update_config", ".", "get", "(", "key", ",", "None", ")", ",", "changes", ",", "namespace", "=", "_namespace", ",", ")", "return", "update_config", "elif", "isinstance", "(", "config", ",", "list", ")", ":", "if", "not", "update_config", ":", "if", "config", ":", "# the updated config is more valid--report that we are using it", "changes", "[", "namespace", "]", "=", "{", "'new'", ":", "config", ",", "'old'", ":", "update_config", ",", "}", "return", "config", "elif", "not", "isinstance", "(", "update_config", ",", "list", ")", ":", "# new config is a list, other isn't--new one wins", "changes", "[", "namespace", "]", "=", "{", "'new'", ":", "config", ",", "'old'", ":", "update_config", ",", "}", "return", "config", "else", ":", "# iterate through config list, ensuring that each index in the", "# update_config list is the same", "for", "idx", ",", "item", "in", "enumerate", "(", "config", ")", ":", "_namespace", "=", "'[{0}]'", ".", "format", "(", "idx", ")", "if", "namespace", ":", "_namespace", "=", "'{0}{1}'", ".", "format", "(", "namespace", ",", "_namespace", ")", "_update", "=", "None", "if", "len", "(", "update_config", ")", ">", "idx", ":", "_update", "=", "update_config", "[", "idx", "]", "if", "_update", ":", "update_config", "[", "idx", "]", "=", "compare_and_update_config", "(", "config", "[", "idx", "]", ",", "_update", ",", "changes", ",", "namespace", "=", "_namespace", ",", ")", "else", ":", "changes", "[", "_namespace", "]", "=", "{", "'new'", ":", "config", "[", "idx", "]", ",", "'old'", ":", "_update", ",", "}", "update_config", ".", "append", "(", "config", "[", "idx", "]", ")", "if", "len", "(", "update_config", ")", ">", "len", "(", "config", ")", ":", "# trim any items in update_config that are not in config", "for", "idx", ",", "old_item", "in", "enumerate", "(", "update_config", ")", ":", "if", "idx", "<", "len", "(", "config", ")", ":", "continue", "_namespace", "=", "'[{0}]'", ".", "format", "(", "idx", ")", "if", "namespace", ":", "_namespace", "=", "'{0}{1}'", ".", "format", "(", "namespace", ",", "_namespace", ")", "changes", "[", "_namespace", "]", "=", "{", "'new'", ":", "None", ",", "'old'", ":", "old_item", ",", "}", "del", "update_config", "[", "len", "(", "config", ")", ":", "]", "return", "update_config", "else", ":", "if", "config", "!=", "update_config", ":", "changes", "[", "namespace", "]", "=", "{", "'new'", ":", "config", ",", "'old'", ":", "update_config", ",", "}", "return", "config" ]
37.888889
15.282828
def deleter(self, func): """Register a delete function for the DynamicProperty This function may only take one argument, self. """ if not callable(func): raise TypeError('deleter must be callable function') if hasattr(func, '__code__') and func.__code__.co_argcount != 1: raise TypeError('deleter must be a function with two arguments') if func.__name__ != self.name: raise TypeError('deleter function must have same name as getter') self._del_func = func return self
[ "def", "deleter", "(", "self", ",", "func", ")", ":", "if", "not", "callable", "(", "func", ")", ":", "raise", "TypeError", "(", "'deleter must be callable function'", ")", "if", "hasattr", "(", "func", ",", "'__code__'", ")", "and", "func", ".", "__code__", ".", "co_argcount", "!=", "1", ":", "raise", "TypeError", "(", "'deleter must be a function with two arguments'", ")", "if", "func", ".", "__name__", "!=", "self", ".", "name", ":", "raise", "TypeError", "(", "'deleter function must have same name as getter'", ")", "self", ".", "_del_func", "=", "func", "return", "self" ]
42.769231
18.769231
def do_ams_put(endpoint, path, body, access_token, rformat="json", ds_min_version="3.0;NetFx"): '''Do a AMS HTTP PUT request and return JSON. Args: endpoint (str): Azure Media Services Initial Endpoint. path (str): Azure Media Services Endpoint Path. body (str): Azure Media Services Content Body. access_token (str): A valid Azure authentication token. rformat (str): A required JSON Accept Format. ds_min_version (str): A required DS MIN Version. Returns: HTTP response. JSON body. ''' min_ds = dsversion_min content_acceptformat = json_acceptformat if rformat == "json_only": min_ds = ds_min_version content_acceptformat = json_only_acceptformat headers = {"Content-Type": content_acceptformat, "DataServiceVersion": min_ds, "MaxDataServiceVersion": dsversion_max, "Accept": json_acceptformat, "Accept-Charset" : charset, "Authorization": "Bearer " + access_token, "x-ms-version" : xmsversion} response = requests.put(endpoint, data=body, headers=headers, allow_redirects=False) # AMS response to the first call can be a redirect, # so we handle it here to make it transparent for the caller... if response.status_code == 301: redirected_url = ''.join([response.headers['location'], path]) response = requests.put(redirected_url, data=body, headers=headers) return response
[ "def", "do_ams_put", "(", "endpoint", ",", "path", ",", "body", ",", "access_token", ",", "rformat", "=", "\"json\"", ",", "ds_min_version", "=", "\"3.0;NetFx\"", ")", ":", "min_ds", "=", "dsversion_min", "content_acceptformat", "=", "json_acceptformat", "if", "rformat", "==", "\"json_only\"", ":", "min_ds", "=", "ds_min_version", "content_acceptformat", "=", "json_only_acceptformat", "headers", "=", "{", "\"Content-Type\"", ":", "content_acceptformat", ",", "\"DataServiceVersion\"", ":", "min_ds", ",", "\"MaxDataServiceVersion\"", ":", "dsversion_max", ",", "\"Accept\"", ":", "json_acceptformat", ",", "\"Accept-Charset\"", ":", "charset", ",", "\"Authorization\"", ":", "\"Bearer \"", "+", "access_token", ",", "\"x-ms-version\"", ":", "xmsversion", "}", "response", "=", "requests", ".", "put", "(", "endpoint", ",", "data", "=", "body", ",", "headers", "=", "headers", ",", "allow_redirects", "=", "False", ")", "# AMS response to the first call can be a redirect,", "# so we handle it here to make it transparent for the caller...", "if", "response", ".", "status_code", "==", "301", ":", "redirected_url", "=", "''", ".", "join", "(", "[", "response", ".", "headers", "[", "'location'", "]", ",", "path", "]", ")", "response", "=", "requests", ".", "put", "(", "redirected_url", ",", "data", "=", "body", ",", "headers", "=", "headers", ")", "return", "response" ]
47.15625
19.28125
def args(self): """Parse args if they have not already been parsed and return the Namespace for args. .. Note:: Accessing args should only be done directly in the App. Returns: (namespace): ArgParser parsed arguments. """ if not self._parsed: # only resolve once self._default_args, unknown = self.parser.parse_known_args() # when running locally retrieve any args from the results.tc file. when running in # platform this is done automatically. self._results_tc_args() # log unknown arguments only once self._unknown_args(unknown) # set parsed bool to ensure args are only parsed once self._parsed = True # update args with value from config data or configuration file self.args_update() return self._default_args
[ "def", "args", "(", "self", ")", ":", "if", "not", "self", ".", "_parsed", ":", "# only resolve once", "self", ".", "_default_args", ",", "unknown", "=", "self", ".", "parser", ".", "parse_known_args", "(", ")", "# when running locally retrieve any args from the results.tc file. when running in", "# platform this is done automatically.", "self", ".", "_results_tc_args", "(", ")", "# log unknown arguments only once", "self", ".", "_unknown_args", "(", "unknown", ")", "# set parsed bool to ensure args are only parsed once", "self", ".", "_parsed", "=", "True", "# update args with value from config data or configuration file", "self", ".", "args_update", "(", ")", "return", "self", ".", "_default_args" ]
35.16
23.08
def message_worker(device): """Loop through messages and pass them on to right device""" _LOGGER.debug("Starting Worker Thread.") msg_q = device.messages while True: if not msg_q.empty(): message = msg_q.get() data = {} try: data = json.loads(message.decode("utf-8")) except ValueError: _LOGGER.error("Received invalid message: %s", message) if 'device_id' in data: device_id = data.get('device_id') if device_id == device.device_id: device.handle_event(data) else: _LOGGER.warning("Received message for unknown device.") msg_q.task_done() time.sleep(0.2)
[ "def", "message_worker", "(", "device", ")", ":", "_LOGGER", ".", "debug", "(", "\"Starting Worker Thread.\"", ")", "msg_q", "=", "device", ".", "messages", "while", "True", ":", "if", "not", "msg_q", ".", "empty", "(", ")", ":", "message", "=", "msg_q", ".", "get", "(", ")", "data", "=", "{", "}", "try", ":", "data", "=", "json", ".", "loads", "(", "message", ".", "decode", "(", "\"utf-8\"", ")", ")", "except", "ValueError", ":", "_LOGGER", ".", "error", "(", "\"Received invalid message: %s\"", ",", "message", ")", "if", "'device_id'", "in", "data", ":", "device_id", "=", "data", ".", "get", "(", "'device_id'", ")", "if", "device_id", "==", "device", ".", "device_id", ":", "device", ".", "handle_event", "(", "data", ")", "else", ":", "_LOGGER", ".", "warning", "(", "\"Received message for unknown device.\"", ")", "msg_q", ".", "task_done", "(", ")", "time", ".", "sleep", "(", "0.2", ")" ]
30.4
19.36
def add_service_certificate(self, service_name, data, certificate_format, password=None): ''' Adds a certificate to a hosted service. service_name: Name of the hosted service. data: The base-64 encoded form of the pfx/cer file. certificate_format: The service certificate format. password: The certificate password. Default to None when using cer format. ''' _validate_not_none('service_name', service_name) _validate_not_none('data', data) _validate_not_none('certificate_format', certificate_format) _validate_not_none('password', password) return self._perform_post( '/' + self.subscription_id + '/services/hostedservices/' + _str(service_name) + '/certificates', _XmlSerializer.certificate_file_to_xml( data, certificate_format, password), as_async=True)
[ "def", "add_service_certificate", "(", "self", ",", "service_name", ",", "data", ",", "certificate_format", ",", "password", "=", "None", ")", ":", "_validate_not_none", "(", "'service_name'", ",", "service_name", ")", "_validate_not_none", "(", "'data'", ",", "data", ")", "_validate_not_none", "(", "'certificate_format'", ",", "certificate_format", ")", "_validate_not_none", "(", "'password'", ",", "password", ")", "return", "self", ".", "_perform_post", "(", "'/'", "+", "self", ".", "subscription_id", "+", "'/services/hostedservices/'", "+", "_str", "(", "service_name", ")", "+", "'/certificates'", ",", "_XmlSerializer", ".", "certificate_file_to_xml", "(", "data", ",", "certificate_format", ",", "password", ")", ",", "as_async", "=", "True", ")" ]
39.04
18.32
def process_post_tags(self, bulk_mode, api_post, post_tags): """ Create or update Tags related to a post. :param bulk_mode: If True, minimize db operations by bulk creating post objects :param api_post: the API data for the post :param post_tags: a mapping of Tags keyed by post ID :return: None """ post_tags[api_post["ID"]] = [] for api_tag in six.itervalues(api_post["tags"]): tag = self.process_post_tag(bulk_mode, api_tag) if tag: post_tags[api_post["ID"]].append(tag)
[ "def", "process_post_tags", "(", "self", ",", "bulk_mode", ",", "api_post", ",", "post_tags", ")", ":", "post_tags", "[", "api_post", "[", "\"ID\"", "]", "]", "=", "[", "]", "for", "api_tag", "in", "six", ".", "itervalues", "(", "api_post", "[", "\"tags\"", "]", ")", ":", "tag", "=", "self", ".", "process_post_tag", "(", "bulk_mode", ",", "api_tag", ")", "if", "tag", ":", "post_tags", "[", "api_post", "[", "\"ID\"", "]", "]", ".", "append", "(", "tag", ")" ]
40.928571
16.785714
async def communicate(self, data_id=None, run_sync=False, save_settings=True): """Scan database for resolving Data objects and process them. This is submitted as a task to the manager's channel workers. :param data_id: Optional id of Data object which (+ its children) should be processes. If it is not given, all resolving objects are processed. :param run_sync: If ``True``, wait until all processes spawned from this point on have finished processing. If no processes are spawned, this results in a deadlock, since counts are handled on process finish. :param save_settings: If ``True``, save the current Django settings context to the global state. This should never be ``True`` for "automatic" calls, such as from Django signals, which can be invoked from inappropriate contexts (such as in the listener). For user code, it should be left at the default value. The saved settings are in effect until the next such call. """ executor = getattr(settings, 'FLOW_EXECUTOR', {}).get('NAME', 'resolwe.flow.executors.local') logger.debug(__( "Manager sending communicate command on '{}' triggered by Data with id {}.", state.MANAGER_CONTROL_CHANNEL, data_id, )) saved_settings = self.state.settings_override if save_settings: saved_settings = self._marshal_settings() self.state.settings_override = saved_settings if run_sync: self._ensure_counter() await self.sync_counter.inc('communicate') try: await consumer.send_event({ WorkerProtocol.COMMAND: WorkerProtocol.COMMUNICATE, WorkerProtocol.COMMUNICATE_SETTINGS: saved_settings, WorkerProtocol.COMMUNICATE_EXTRA: { 'data_id': data_id, 'executor': executor, }, }) except ChannelFull: logger.exception("ChannelFull error occurred while sending communicate message.") await self.sync_counter.dec('communicate') if run_sync and not self.sync_counter.active: logger.debug(__( "Manager on channel '{}' entering synchronization block.", state.MANAGER_CONTROL_CHANNEL )) await self.execution_barrier() logger.debug(__( "Manager on channel '{}' exiting synchronization block.", state.MANAGER_CONTROL_CHANNEL ))
[ "async", "def", "communicate", "(", "self", ",", "data_id", "=", "None", ",", "run_sync", "=", "False", ",", "save_settings", "=", "True", ")", ":", "executor", "=", "getattr", "(", "settings", ",", "'FLOW_EXECUTOR'", ",", "{", "}", ")", ".", "get", "(", "'NAME'", ",", "'resolwe.flow.executors.local'", ")", "logger", ".", "debug", "(", "__", "(", "\"Manager sending communicate command on '{}' triggered by Data with id {}.\"", ",", "state", ".", "MANAGER_CONTROL_CHANNEL", ",", "data_id", ",", ")", ")", "saved_settings", "=", "self", ".", "state", ".", "settings_override", "if", "save_settings", ":", "saved_settings", "=", "self", ".", "_marshal_settings", "(", ")", "self", ".", "state", ".", "settings_override", "=", "saved_settings", "if", "run_sync", ":", "self", ".", "_ensure_counter", "(", ")", "await", "self", ".", "sync_counter", ".", "inc", "(", "'communicate'", ")", "try", ":", "await", "consumer", ".", "send_event", "(", "{", "WorkerProtocol", ".", "COMMAND", ":", "WorkerProtocol", ".", "COMMUNICATE", ",", "WorkerProtocol", ".", "COMMUNICATE_SETTINGS", ":", "saved_settings", ",", "WorkerProtocol", ".", "COMMUNICATE_EXTRA", ":", "{", "'data_id'", ":", "data_id", ",", "'executor'", ":", "executor", ",", "}", ",", "}", ")", "except", "ChannelFull", ":", "logger", ".", "exception", "(", "\"ChannelFull error occurred while sending communicate message.\"", ")", "await", "self", ".", "sync_counter", ".", "dec", "(", "'communicate'", ")", "if", "run_sync", "and", "not", "self", ".", "sync_counter", ".", "active", ":", "logger", ".", "debug", "(", "__", "(", "\"Manager on channel '{}' entering synchronization block.\"", ",", "state", ".", "MANAGER_CONTROL_CHANNEL", ")", ")", "await", "self", ".", "execution_barrier", "(", ")", "logger", ".", "debug", "(", "__", "(", "\"Manager on channel '{}' exiting synchronization block.\"", ",", "state", ".", "MANAGER_CONTROL_CHANNEL", ")", ")" ]
44.948276
21.810345
def _generate_shape(word: str) -> str: """ Recreate shape from a token input by user Args: word: str Returns: str """ def counting_stars(w) -> List[int]: count = [1] for i in range(1, len(w)): if w[i - 1] == w[i]: count[-1] += 1 else: count.append(1) return count shape = "" p = 0 for c in counting_stars(word): if c > 4: shape += word[p:p + 4] else: shape += word[p:p + c] p = p + c return shape
[ "def", "_generate_shape", "(", "word", ":", "str", ")", "->", "str", ":", "def", "counting_stars", "(", "w", ")", "->", "List", "[", "int", "]", ":", "count", "=", "[", "1", "]", "for", "i", "in", "range", "(", "1", ",", "len", "(", "w", ")", ")", ":", "if", "w", "[", "i", "-", "1", "]", "==", "w", "[", "i", "]", ":", "count", "[", "-", "1", "]", "+=", "1", "else", ":", "count", ".", "append", "(", "1", ")", "return", "count", "shape", "=", "\"\"", "p", "=", "0", "for", "c", "in", "counting_stars", "(", "word", ")", ":", "if", "c", ">", "4", ":", "shape", "+=", "word", "[", "p", ":", "p", "+", "4", "]", "else", ":", "shape", "+=", "word", "[", "p", ":", "p", "+", "c", "]", "p", "=", "p", "+", "c", "return", "shape" ]
22.103448
16.724138
def fit(self, y, exogenous=None): """Fit the transformer Computes the periods of all the Fourier terms. The values of ``y`` are not actually used; only the periodicity is used when computing Fourier terms. Parameters ---------- y : array-like or None, shape=(n_samples,) The endogenous (time-series) array. exogenous : array-like or None, shape=(n_samples, n_features), optional The exogenous array of additional covariates. If specified, the Fourier terms will be column-bound on the right side of the matrix. Otherwise, the Fourier terms will be returned as the new exogenous array. """ # Since we don't fit any params here, we can just check the params _, _ = self._check_y_exog(y, exogenous, null_allowed=True) m = self.m k = self.k if k is None: k = m // 2 if 2 * k > m or k < 1: raise ValueError("k must be a positive integer not greater " "than m//2") # Compute the periods of all Fourier terms. Since R allows multiple # seasonality and we do not, we can do this much more simply. p = ((np.arange(k) + 1) / m).astype(np.float64) # 1:K / m # If sinpi is 0... maybe blow up? # if abs(2 * p - round(2 * p)) < np.finfo(y.dtype).eps: # min eps self.p_ = p self.k_ = k self.n_ = y.shape[0] return self
[ "def", "fit", "(", "self", ",", "y", ",", "exogenous", "=", "None", ")", ":", "# Since we don't fit any params here, we can just check the params", "_", ",", "_", "=", "self", ".", "_check_y_exog", "(", "y", ",", "exogenous", ",", "null_allowed", "=", "True", ")", "m", "=", "self", ".", "m", "k", "=", "self", ".", "k", "if", "k", "is", "None", ":", "k", "=", "m", "//", "2", "if", "2", "*", "k", ">", "m", "or", "k", "<", "1", ":", "raise", "ValueError", "(", "\"k must be a positive integer not greater \"", "\"than m//2\"", ")", "# Compute the periods of all Fourier terms. Since R allows multiple", "# seasonality and we do not, we can do this much more simply.", "p", "=", "(", "(", "np", ".", "arange", "(", "k", ")", "+", "1", ")", "/", "m", ")", ".", "astype", "(", "np", ".", "float64", ")", "# 1:K / m", "# If sinpi is 0... maybe blow up?", "# if abs(2 * p - round(2 * p)) < np.finfo(y.dtype).eps: # min eps", "self", ".", "p_", "=", "p", "self", ".", "k_", "=", "k", "self", ".", "n_", "=", "y", ".", "shape", "[", "0", "]", "return", "self" ]
36.04878
25.536585
def log_commit( self, block_id, vtxindex, op, opcode, op_data ): """ Log a committed operation """ debug_op = self.sanitize_op( op_data ) if 'history' in debug_op: del debug_op['history'] log.debug("COMMIT %s (%s) at (%s, %s) data: %s", opcode, op, block_id, vtxindex, ", ".join( ["%s='%s'" % (k, debug_op[k]) for k in sorted(debug_op.keys())] ) ) return
[ "def", "log_commit", "(", "self", ",", "block_id", ",", "vtxindex", ",", "op", ",", "opcode", ",", "op_data", ")", ":", "debug_op", "=", "self", ".", "sanitize_op", "(", "op_data", ")", "if", "'history'", "in", "debug_op", ":", "del", "debug_op", "[", "'history'", "]", "log", ".", "debug", "(", "\"COMMIT %s (%s) at (%s, %s) data: %s\"", ",", "opcode", ",", "op", ",", "block_id", ",", "vtxindex", ",", "\", \"", ".", "join", "(", "[", "\"%s='%s'\"", "%", "(", "k", ",", "debug_op", "[", "k", "]", ")", "for", "k", "in", "sorted", "(", "debug_op", ".", "keys", "(", ")", ")", "]", ")", ")", "return" ]
33.153846
22.846154
def node_has_namespaces(node: BaseEntity, namespaces: Set[str]) -> bool: """Pass for nodes that have one of the given namespaces.""" ns = node.get(NAMESPACE) return ns is not None and ns in namespaces
[ "def", "node_has_namespaces", "(", "node", ":", "BaseEntity", ",", "namespaces", ":", "Set", "[", "str", "]", ")", "->", "bool", ":", "ns", "=", "node", ".", "get", "(", "NAMESPACE", ")", "return", "ns", "is", "not", "None", "and", "ns", "in", "namespaces" ]
52.25
12.5
def _collapse_variants_by_function(graph: BELGraph, func: str) -> None: """Collapse all of the given functions' variants' edges to their parents, in-place.""" for parent_node, variant_node, data in graph.edges(data=True): if data[RELATION] == HAS_VARIANT and parent_node.function == func: collapse_pair(graph, from_node=variant_node, to_node=parent_node)
[ "def", "_collapse_variants_by_function", "(", "graph", ":", "BELGraph", ",", "func", ":", "str", ")", "->", "None", ":", "for", "parent_node", ",", "variant_node", ",", "data", "in", "graph", ".", "edges", "(", "data", "=", "True", ")", ":", "if", "data", "[", "RELATION", "]", "==", "HAS_VARIANT", "and", "parent_node", ".", "function", "==", "func", ":", "collapse_pair", "(", "graph", ",", "from_node", "=", "variant_node", ",", "to_node", "=", "parent_node", ")" ]
75.6
25.6
def rotation_matrix(angle, direction, point=None): """ Returns matrix to rotate about axis defined by point and direction. Examples: >>> angle = (random.random() - 0.5) * (2*math.pi) >>> direc = numpy.random.random(3) - 0.5 >>> point = numpy.random.random(3) - 0.5 >>> R0 = rotation_matrix(angle, direc, point) >>> R1 = rotation_matrix(angle-2*math.pi, direc, point) >>> is_same_transform(R0, R1) True >>> R0 = rotation_matrix(angle, direc, point) >>> R1 = rotation_matrix(-angle, -direc, point) >>> is_same_transform(R0, R1) True >>> I = numpy.identity(4, numpy.float32) >>> numpy.allclose(I, rotation_matrix(math.pi*2, direc)) True >>> numpy.allclose(2., numpy.trace(rotation_matrix(math.pi/2, ... direc, point))) True """ sina = math.sin(angle) cosa = math.cos(angle) direction = unit_vector(direction[:3]) # rotation matrix around unit vector R = np.array( ((cosa, 0.0, 0.0), (0.0, cosa, 0.0), (0.0, 0.0, cosa)), dtype=np.float32 ) R += np.outer(direction, direction) * (1.0 - cosa) direction *= sina R += np.array( ( (0.0, -direction[2], direction[1]), (direction[2], 0.0, -direction[0]), (-direction[1], direction[0], 0.0), ), dtype=np.float32, ) M = np.identity(4) M[:3, :3] = R if point is not None: # rotation not around origin point = np.array(point[:3], dtype=np.float32, copy=False) M[:3, 3] = point - np.dot(R, point) return M
[ "def", "rotation_matrix", "(", "angle", ",", "direction", ",", "point", "=", "None", ")", ":", "sina", "=", "math", ".", "sin", "(", "angle", ")", "cosa", "=", "math", ".", "cos", "(", "angle", ")", "direction", "=", "unit_vector", "(", "direction", "[", ":", "3", "]", ")", "# rotation matrix around unit vector", "R", "=", "np", ".", "array", "(", "(", "(", "cosa", ",", "0.0", ",", "0.0", ")", ",", "(", "0.0", ",", "cosa", ",", "0.0", ")", ",", "(", "0.0", ",", "0.0", ",", "cosa", ")", ")", ",", "dtype", "=", "np", ".", "float32", ")", "R", "+=", "np", ".", "outer", "(", "direction", ",", "direction", ")", "*", "(", "1.0", "-", "cosa", ")", "direction", "*=", "sina", "R", "+=", "np", ".", "array", "(", "(", "(", "0.0", ",", "-", "direction", "[", "2", "]", ",", "direction", "[", "1", "]", ")", ",", "(", "direction", "[", "2", "]", ",", "0.0", ",", "-", "direction", "[", "0", "]", ")", ",", "(", "-", "direction", "[", "1", "]", ",", "direction", "[", "0", "]", ",", "0.0", ")", ",", ")", ",", "dtype", "=", "np", ".", "float32", ",", ")", "M", "=", "np", ".", "identity", "(", "4", ")", "M", "[", ":", "3", ",", ":", "3", "]", "=", "R", "if", "point", "is", "not", "None", ":", "# rotation not around origin", "point", "=", "np", ".", "array", "(", "point", "[", ":", "3", "]", ",", "dtype", "=", "np", ".", "float32", ",", "copy", "=", "False", ")", "M", "[", ":", "3", ",", "3", "]", "=", "point", "-", "np", ".", "dot", "(", "R", ",", "point", ")", "return", "M" ]
33.489796
18.959184
def refresh(self, force: bool = False) -> bool: """ Loads the cauldron.json definition file for the project and populates the project with the loaded data. Any existing data will be overwritten, if the new definition file differs from the previous one. If the project has already loaded with the most recent version of the cauldron.json file, this method will return without making any changes to the project. :param force: If true the project will be refreshed even if the project file modified timestamp doesn't indicate that it needs to be refreshed. :return: Whether or not a refresh was needed and carried out """ lm = self.last_modified is_newer = lm is not None and lm >= os.path.getmtime(self.source_path) if not force and is_newer: return False old_definition = self.settings.fetch(None) new_definition = definitions.load_project_definition( self.source_directory ) if not force and old_definition == new_definition: return False self.settings.clear().put(**new_definition) old_step_definitions = old_definition.get('steps', []) new_step_definitions = new_definition.get('steps', []) if not force and old_step_definitions == new_step_definitions: return True old_steps = self.steps self.steps = [] for step_data in new_step_definitions: matches = [s for s in old_step_definitions if s == step_data] if len(matches) > 0: index = old_step_definitions.index(matches[0]) self.steps.append(old_steps[index]) else: self.add_step(step_data) self.last_modified = time.time() return True
[ "def", "refresh", "(", "self", ",", "force", ":", "bool", "=", "False", ")", "->", "bool", ":", "lm", "=", "self", ".", "last_modified", "is_newer", "=", "lm", "is", "not", "None", "and", "lm", ">=", "os", ".", "path", ".", "getmtime", "(", "self", ".", "source_path", ")", "if", "not", "force", "and", "is_newer", ":", "return", "False", "old_definition", "=", "self", ".", "settings", ".", "fetch", "(", "None", ")", "new_definition", "=", "definitions", ".", "load_project_definition", "(", "self", ".", "source_directory", ")", "if", "not", "force", "and", "old_definition", "==", "new_definition", ":", "return", "False", "self", ".", "settings", ".", "clear", "(", ")", ".", "put", "(", "*", "*", "new_definition", ")", "old_step_definitions", "=", "old_definition", ".", "get", "(", "'steps'", ",", "[", "]", ")", "new_step_definitions", "=", "new_definition", ".", "get", "(", "'steps'", ",", "[", "]", ")", "if", "not", "force", "and", "old_step_definitions", "==", "new_step_definitions", ":", "return", "True", "old_steps", "=", "self", ".", "steps", "self", ".", "steps", "=", "[", "]", "for", "step_data", "in", "new_step_definitions", ":", "matches", "=", "[", "s", "for", "s", "in", "old_step_definitions", "if", "s", "==", "step_data", "]", "if", "len", "(", "matches", ")", ">", "0", ":", "index", "=", "old_step_definitions", ".", "index", "(", "matches", "[", "0", "]", ")", "self", ".", "steps", ".", "append", "(", "old_steps", "[", "index", "]", ")", "else", ":", "self", ".", "add_step", "(", "step_data", ")", "self", ".", "last_modified", "=", "time", ".", "time", "(", ")", "return", "True" ]
35.764706
23.607843
def _add_parser_arguments_git(self, subparsers): """Create a sub-parsers for git subcommands. """ subparsers.add_parser( "git-clone", help="Clone all defined data repositories if they dont exist.") subparsers.add_parser( "git-push", help="Add all files to data repositories, commit, and push.") subparsers.add_parser( "git-pull", help="'Pull' all data repositories.") subparsers.add_parser( "git-reset-local", help="Hard reset all data repositories using local 'HEAD'.") subparsers.add_parser( "git-reset-origin", help="Hard reset all data repositories using 'origin/master'.") subparsers.add_parser( "git-status", help="Get the 'git status' of all data repositories.") return
[ "def", "_add_parser_arguments_git", "(", "self", ",", "subparsers", ")", ":", "subparsers", ".", "add_parser", "(", "\"git-clone\"", ",", "help", "=", "\"Clone all defined data repositories if they dont exist.\"", ")", "subparsers", ".", "add_parser", "(", "\"git-push\"", ",", "help", "=", "\"Add all files to data repositories, commit, and push.\"", ")", "subparsers", ".", "add_parser", "(", "\"git-pull\"", ",", "help", "=", "\"'Pull' all data repositories.\"", ")", "subparsers", ".", "add_parser", "(", "\"git-reset-local\"", ",", "help", "=", "\"Hard reset all data repositories using local 'HEAD'.\"", ")", "subparsers", ".", "add_parser", "(", "\"git-reset-origin\"", ",", "help", "=", "\"Hard reset all data repositories using 'origin/master'.\"", ")", "subparsers", ".", "add_parser", "(", "\"git-status\"", ",", "help", "=", "\"Get the 'git status' of all data repositories.\"", ")", "return" ]
31.107143
21
def _get_date_facet_counts(self, timespan, date_field, start_date=None, end_date=None): ''' Returns Range Facet counts based on ''' if 'DAY' not in timespan: raise ValueError("At this time, only DAY date range increment is supported. Aborting..... ") #Need to do this a bit better later. Don't like the string and date concatenations. if not start_date: start_date = self._get_edge_date(date_field, 'asc') start_date = datetime.strptime(start_date,'%Y-%m-%dT%H:%M:%S.%fZ').date().isoformat()+'T00:00:00.000Z' else: start_date = start_date+'T00:00:00.000Z' if not end_date: end_date = self._get_edge_date(date_field, 'desc') end_date = datetime.strptime(end_date,'%Y-%m-%dT%H:%M:%S.%fZ').date() end_date += timedelta(days=1) end_date = end_date.isoformat()+'T00:00:00.000Z' else: end_date = end_date+'T00:00:00.000Z' self.log.info("Processing Items from {} to {}".format(start_date, end_date)) #Get facet counts for source and destination collections source_facet = self._source.query(self._source_coll, self._get_date_range_query(timespan=timespan, start_date=start_date, end_date=end_date) ).get_facets_ranges()[date_field] dest_facet = self._dest.query( self._dest_coll, self._get_date_range_query( timespan=timespan, start_date=start_date, end_date=end_date )).get_facets_ranges()[date_field] return source_facet, dest_facet
[ "def", "_get_date_facet_counts", "(", "self", ",", "timespan", ",", "date_field", ",", "start_date", "=", "None", ",", "end_date", "=", "None", ")", ":", "if", "'DAY'", "not", "in", "timespan", ":", "raise", "ValueError", "(", "\"At this time, only DAY date range increment is supported. Aborting..... \"", ")", "#Need to do this a bit better later. Don't like the string and date concatenations.\r", "if", "not", "start_date", ":", "start_date", "=", "self", ".", "_get_edge_date", "(", "date_field", ",", "'asc'", ")", "start_date", "=", "datetime", ".", "strptime", "(", "start_date", ",", "'%Y-%m-%dT%H:%M:%S.%fZ'", ")", ".", "date", "(", ")", ".", "isoformat", "(", ")", "+", "'T00:00:00.000Z'", "else", ":", "start_date", "=", "start_date", "+", "'T00:00:00.000Z'", "if", "not", "end_date", ":", "end_date", "=", "self", ".", "_get_edge_date", "(", "date_field", ",", "'desc'", ")", "end_date", "=", "datetime", ".", "strptime", "(", "end_date", ",", "'%Y-%m-%dT%H:%M:%S.%fZ'", ")", ".", "date", "(", ")", "end_date", "+=", "timedelta", "(", "days", "=", "1", ")", "end_date", "=", "end_date", ".", "isoformat", "(", ")", "+", "'T00:00:00.000Z'", "else", ":", "end_date", "=", "end_date", "+", "'T00:00:00.000Z'", "self", ".", "log", ".", "info", "(", "\"Processing Items from {} to {}\"", ".", "format", "(", "start_date", ",", "end_date", ")", ")", "#Get facet counts for source and destination collections\r", "source_facet", "=", "self", ".", "_source", ".", "query", "(", "self", ".", "_source_coll", ",", "self", ".", "_get_date_range_query", "(", "timespan", "=", "timespan", ",", "start_date", "=", "start_date", ",", "end_date", "=", "end_date", ")", ")", ".", "get_facets_ranges", "(", ")", "[", "date_field", "]", "dest_facet", "=", "self", ".", "_dest", ".", "query", "(", "self", ".", "_dest_coll", ",", "self", ".", "_get_date_range_query", "(", "timespan", "=", "timespan", ",", "start_date", "=", "start_date", ",", "end_date", "=", "end_date", ")", ")", ".", "get_facets_ranges", "(", ")", "[", "date_field", "]", "return", "source_facet", ",", "dest_facet" ]
47.882353
27.823529
def merge(*args): """Implements the 'merge' operator for merging lists.""" ret = [] for arg in args: if isinstance(arg, list) or isinstance(arg, tuple): ret += list(arg) else: ret.append(arg) return ret
[ "def", "merge", "(", "*", "args", ")", ":", "ret", "=", "[", "]", "for", "arg", "in", "args", ":", "if", "isinstance", "(", "arg", ",", "list", ")", "or", "isinstance", "(", "arg", ",", "tuple", ")", ":", "ret", "+=", "list", "(", "arg", ")", "else", ":", "ret", ".", "append", "(", "arg", ")", "return", "ret" ]
27.777778
18.666667
def whitespace_around_operator(logical_line): """ Avoid extraneous whitespace in the following situations: - More than one space around an assignment (or other) operator to align it with another. """ line = logical_line for operator in operators: found = line.find(' ' + operator) if found > -1: return found, "E221 multiple spaces before operator" found = line.find(operator + ' ') if found > -1: return found, "E222 multiple spaces after operator" found = line.find('\t' + operator) if found > -1: return found, "E223 tab before operator" found = line.find(operator + '\t') if found > -1: return found, "E224 tab after operator"
[ "def", "whitespace_around_operator", "(", "logical_line", ")", ":", "line", "=", "logical_line", "for", "operator", "in", "operators", ":", "found", "=", "line", ".", "find", "(", "' '", "+", "operator", ")", "if", "found", ">", "-", "1", ":", "return", "found", ",", "\"E221 multiple spaces before operator\"", "found", "=", "line", ".", "find", "(", "operator", "+", "' '", ")", "if", "found", ">", "-", "1", ":", "return", "found", ",", "\"E222 multiple spaces after operator\"", "found", "=", "line", ".", "find", "(", "'\\t'", "+", "operator", ")", "if", "found", ">", "-", "1", ":", "return", "found", ",", "\"E223 tab before operator\"", "found", "=", "line", ".", "find", "(", "operator", "+", "'\\t'", ")", "if", "found", ">", "-", "1", ":", "return", "found", ",", "\"E224 tab after operator\"" ]
35.952381
13.47619
def _backsearch(self): """ Inspect previous peaks from the last detected qrs peak (if any), using a lower threshold """ if self.last_qrs_peak_num is not None: for peak_num in range(self.last_qrs_peak_num + 1, self.peak_num + 1): if self._is_qrs(peak_num=peak_num, backsearch=True): self._update_qrs(peak_num=peak_num, backsearch=True)
[ "def", "_backsearch", "(", "self", ")", ":", "if", "self", ".", "last_qrs_peak_num", "is", "not", "None", ":", "for", "peak_num", "in", "range", "(", "self", ".", "last_qrs_peak_num", "+", "1", ",", "self", ".", "peak_num", "+", "1", ")", ":", "if", "self", ".", "_is_qrs", "(", "peak_num", "=", "peak_num", ",", "backsearch", "=", "True", ")", ":", "self", ".", "_update_qrs", "(", "peak_num", "=", "peak_num", ",", "backsearch", "=", "True", ")" ]
41.4
20.6
def getServiceEndpoints(self, yadis_url, service_element): """Returns an iterator of endpoint objects produced by the filter functions.""" endpoints = [] # Do an expansion of the service element by xrd:Type and xrd:URI for type_uris, uri, _ in expandService(service_element): # Create a basic endpoint object to represent this # yadis_url, Service, Type, URI combination endpoint = BasicServiceEndpoint( yadis_url, type_uris, uri, service_element) e = self.applyFilters(endpoint) if e is not None: endpoints.append(e) return endpoints
[ "def", "getServiceEndpoints", "(", "self", ",", "yadis_url", ",", "service_element", ")", ":", "endpoints", "=", "[", "]", "# Do an expansion of the service element by xrd:Type and xrd:URI", "for", "type_uris", ",", "uri", ",", "_", "in", "expandService", "(", "service_element", ")", ":", "# Create a basic endpoint object to represent this", "# yadis_url, Service, Type, URI combination", "endpoint", "=", "BasicServiceEndpoint", "(", "yadis_url", ",", "type_uris", ",", "uri", ",", "service_element", ")", "e", "=", "self", ".", "applyFilters", "(", "endpoint", ")", "if", "e", "is", "not", "None", ":", "endpoints", ".", "append", "(", "e", ")", "return", "endpoints" ]
36.722222
19.277778