text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def get_default_config(self): """ Returns the default collector settings """ config = super(TokuMXCollector, self).get_default_config() config.update({ 'path': 'mongo', 'hosts': ['localhost'], 'user': None, 'passwd': None, 'databases': '.*', 'ignore_collections': '^tmp\.mr\.', 'network_timeout': None, 'simple': 'False', 'translate_collections': 'False' }) return config
[ "def", "get_default_config", "(", "self", ")", ":", "config", "=", "super", "(", "TokuMXCollector", ",", "self", ")", ".", "get_default_config", "(", ")", "config", ".", "update", "(", "{", "'path'", ":", "'mongo'", ",", "'hosts'", ":", "[", "'localhost'",...
31.647059
10
def Runtime_releaseObjectGroup(self, objectGroup): """ Function path: Runtime.releaseObjectGroup Domain: Runtime Method name: releaseObjectGroup Parameters: Required arguments: 'objectGroup' (type: string) -> Symbolic object group name. No return value. Description: Releases all remote objects that belong to a given group. """ assert isinstance(objectGroup, (str,) ), "Argument 'objectGroup' must be of type '['str']'. Received type: '%s'" % type( objectGroup) subdom_funcs = self.synchronous_command('Runtime.releaseObjectGroup', objectGroup=objectGroup) return subdom_funcs
[ "def", "Runtime_releaseObjectGroup", "(", "self", ",", "objectGroup", ")", ":", "assert", "isinstance", "(", "objectGroup", ",", "(", "str", ",", ")", ")", ",", "\"Argument 'objectGroup' must be of type '['str']'. Received type: '%s'\"", "%", "type", "(", "objectGroup",...
32.578947
19.421053
def indicator_summary_table(): """Export a table listing all NUTS2 regions with their (most current) data. Output is a CSV file and an Excel file, saved as 'characters.csv/.xlsx' in the output directory. """ # a database client/session to run queries in cl = client.get_client() session = cl.create_session() # Query regions and indicators separately, join them in pandas query = session.query(models.NUTS2Region.name, models.NUTS2Region.key, models.NUTS2Region.id) df = cl.df_query(query).set_index('id') # For each EuroStat indicator, query latest available data year (varies between indicators) indicators = session.query(models.EuroStatIndicator.description, models.EuroStatIndicator.id).all() for description, indicator_id in indicators: latest_year = session.query(sa.func.max(models.EuroStatValue.year)) \ .filter(models.EuroStatValue.indicator_id == indicator_id) \ .scalar() query = session.query(models.EuroStatValue.value, models.EuroStatValue.region_id) \ .filter(models.EuroStatValue.indicator_id == indicator_id) \ .filter(models.EuroStatValue.year == latest_year) values = cl.df_query(query).set_index('region_id')['value'] # rename series to description + year, join to data table values.name = description + ' ' + str(latest_year) df = df.join(values, how='left') # Query and join in weather indicators query = session.query(models.ClimateValue.region_id, models.ClimateValue.value, models.ClimateIndicator.description) \ .join(models.ClimateIndicator) weather = cl.df_query(query).dropna(how='any') # pivot different indicators to columns, join to data table weather = weather.set_index(['region_id', 'description'])['value'].unstack() df = df.join(weather, how='left') # write output as both CSV and Excel; do not include index column df.to_csv(path.join(out_dir, "nuts2_values.csv"), encoding='utf-8', index=False) df.to_excel(path.join(out_dir, "nuts2_values.xlsx"), encoding='utf-8', index=False) session.close()
[ "def", "indicator_summary_table", "(", ")", ":", "# a database client/session to run queries in", "cl", "=", "client", ".", "get_client", "(", ")", "session", "=", "cl", ".", "create_session", "(", ")", "# Query regions and indicators separately, join them in pandas", "quer...
45.04
23.88
def create_parameter_group(name, db_parameter_group_family, description, tags=None, region=None, key=None, keyid=None, profile=None): ''' Create an RDS parameter group CLI example to create an RDS parameter group:: salt myminion boto_rds.create_parameter_group my-param-group mysql5.6 \ "group description" ''' res = __salt__['boto_rds.parameter_group_exists'](name, tags, region, key, keyid, profile) if res.get('exists'): return {'exists': bool(res)} try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return {'results': bool(conn)} taglist = _tag_doc(tags) rds = conn.create_db_parameter_group(DBParameterGroupName=name, DBParameterGroupFamily=db_parameter_group_family, Description=description, Tags=taglist) if not rds: return {'created': False, 'message': 'Failed to create RDS parameter group {0}'.format(name)} return {'exists': bool(rds), 'message': 'Created RDS parameter group {0}'.format(name)} except ClientError as e: return {'error': __utils__['boto3.get_error'](e)}
[ "def", "create_parameter_group", "(", "name", ",", "db_parameter_group_family", ",", "description", ",", "tags", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "res", "=...
41.588235
24.529412
def _xdr_read_address(unpacker): """Reads a stellar address and returns the string representing the address This method assumes the encoded address is a public address (starting with G) """ # First 4 bytes are the address type address_type = unpacker.unpack_uint() if address_type != 0: raise ValueError("Unsupported address type") return address_from_public_key(unpacker.unpack_fopaque(32))
[ "def", "_xdr_read_address", "(", "unpacker", ")", ":", "# First 4 bytes are the address type", "address_type", "=", "unpacker", ".", "unpack_uint", "(", ")", "if", "address_type", "!=", "0", ":", "raise", "ValueError", "(", "\"Unsupported address type\"", ")", "return...
41.9
14
def update_iteration_num_suggestions(self, num_suggestions): """Update iteration's num_suggestions.""" iteration_config = self.experiment_group.iteration_config iteration_config.num_suggestions = num_suggestions self._update_config(iteration_config)
[ "def", "update_iteration_num_suggestions", "(", "self", ",", "num_suggestions", ")", ":", "iteration_config", "=", "self", ".", "experiment_group", ".", "iteration_config", "iteration_config", ".", "num_suggestions", "=", "num_suggestions", "self", ".", "_update_config", ...
46.166667
18
def info(self): """ Return information of chord to display """ return """{} root={} quality={} appended={} on={}""".format(self._chord, self._root, self._quality, self._appended, self._on)
[ "def", "info", "(", "self", ")", ":", "return", "\"\"\"{}\nroot={}\nquality={}\nappended={}\non={}\"\"\"", ".", "format", "(", "self", ".", "_chord", ",", "self", ".", "_root", ",", "self", ".", "_quality", ",", "self", ".", "_appended", ",", "self", ".", "_...
28.285714
16.714286
def add_script_sequence(self): """ creates a script sequence based on the script iterator type selected and the selected scripts and sends it to the tree self.tree_loaded """ def empty_tree(tree_model): # COMMENT_ME def add_children_to_list(item, somelist): if item.hasChildren(): for rownum in range(0, item.rowCount()): somelist.append(str(item.child(rownum, 0).text())) output_list = [] root = tree_model.invisibleRootItem() add_children_to_list(root, output_list) tree_model.clear() return output_list name = str(self.txt_script_sequence_name.text()) new_script_list = empty_tree(self.tree_script_sequence_model) new_script_dict = {} for script in new_script_list: if script in self.elements_old: new_script_dict.update({script: self.elements_old[script]}) elif script in self.elements_from_file: new_script_dict.update({script: self.elements_from_file[script]}) new_script_parameter_dict = {} for index, script in enumerate(new_script_list): new_script_parameter_dict.update({script: index}) # QtGui.QTextEdit.toPlainText() # get the module of the current dialogue package = get_python_package(inspect.getmodule(self).__file__) assert package is not None # check that we actually find a module # class_name = Script.set_up_dynamic_script(factory_scripts, new_script_parameter_list, self.cmb_looping_variable.currentText() == 'Parameter Sweep') new_script_dict = {name: {'class': 'ScriptIterator', 'package': package, 'scripts': new_script_dict, 'info': str(self.txt_info.toPlainText()), 'settings': {'script_order': new_script_parameter_dict, 'iterator_type': str(self.cmb_looping_variable.currentText())}}} self.selected_element_name = name self.fill_tree(self.tree_loaded, new_script_dict) self.elements_from_file.update(new_script_dict)
[ "def", "add_script_sequence", "(", "self", ")", ":", "def", "empty_tree", "(", "tree_model", ")", ":", "# COMMENT_ME", "def", "add_children_to_list", "(", "item", ",", "somelist", ")", ":", "if", "item", ".", "hasChildren", "(", ")", ":", "for", "rownum", ...
44.755102
26.265306
def ast_scan_file(filename, re_fallback=True): '''Scans a file for imports using AST. In addition to normal imports, try to get imports via `__import__` or `import_module` calls. The AST parser should be able to resolve simple variable assignments in cases where these functions are called with variables instead of strings. ''' try: with io.open(filename, 'rb') as fp: try: root = ast.parse(fp.read(), filename=filename) except (SyntaxError, IndentationError): if re_fallback: log.debug('Falling back to regex scanner') return _ast_scan_file_re(filename) else: log.error('Could not parse file: %s', filename) log.info('Exception:', exc_info=True) return None, None log.debug('Starting AST Scan: %s', filename) ast_visitor.reset(filename) ast_visitor.visit(root) log.debug('Project path: %s', ast_visitor.import_root) return ast_visitor.scope, ast_visitor.imports except IOError: log.warn('Could not open file: %s', filename) return None, None
[ "def", "ast_scan_file", "(", "filename", ",", "re_fallback", "=", "True", ")", ":", "try", ":", "with", "io", ".", "open", "(", "filename", ",", "'rb'", ")", "as", "fp", ":", "try", ":", "root", "=", "ast", ".", "parse", "(", "fp", ".", "read", "...
41.275862
18.655172
def Parse(self, cmd, args, stdout, stderr, return_val, time_taken, knowledge_base): """Parse the yum output.""" _ = stderr, time_taken, args, knowledge_base # Unused. self.CheckReturn(cmd, return_val) packages = [] for line in stdout.decode("utf-8").splitlines()[1:]: # Ignore first line cols = line.split() name_arch, version, source = cols name, arch = name_arch.split(".") status = rdf_client.SoftwarePackage.InstallState.INSTALLED packages.append( rdf_client.SoftwarePackage( name=name, publisher=source, version=version, architecture=arch, install_state=status)) if packages: yield rdf_client.SoftwarePackages(packages=packages)
[ "def", "Parse", "(", "self", ",", "cmd", ",", "args", ",", "stdout", ",", "stderr", ",", "return_val", ",", "time_taken", ",", "knowledge_base", ")", ":", "_", "=", "stderr", ",", "time_taken", ",", "args", ",", "knowledge_base", "# Unused.", "self", "."...
34.909091
15.954545
def export(self, file_path=None, export_format=None): """ Write the users to a file. """ with io.open(file_path, mode='w', encoding="utf-8") as export_file: if export_format == 'yaml': import yaml yaml.safe_dump(self.to_dict(), export_file, default_flow_style=False) elif export_format == 'json': export_file.write(text_type(json.dumps(self.to_dict(), ensure_ascii=False))) return True
[ "def", "export", "(", "self", ",", "file_path", "=", "None", ",", "export_format", "=", "None", ")", ":", "with", "io", ".", "open", "(", "file_path", ",", "mode", "=", "'w'", ",", "encoding", "=", "\"utf-8\"", ")", "as", "export_file", ":", "if", "e...
53
19.666667
def coherence_spectrogram(self, other, stride, fftlength=None, overlap=None, window='hann', nproc=1): """Calculate the coherence spectrogram between this `TimeSeries` and other. Parameters ---------- other : `TimeSeries` the second `TimeSeries` in this CSD calculation stride : `float` number of seconds in single PSD (column of spectrogram) fftlength : `float` number of seconds in single FFT overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats nproc : `int` number of parallel processes to use when calculating individual coherence spectra. Returns ------- spectrogram : `~gwpy.spectrogram.Spectrogram` time-frequency coherence spectrogram as generated from the input time-series. """ from ..spectrogram.coherence import from_timeseries return from_timeseries(self, other, stride, fftlength=fftlength, overlap=overlap, window=window, nproc=nproc)
[ "def", "coherence_spectrogram", "(", "self", ",", "other", ",", "stride", ",", "fftlength", "=", "None", ",", "overlap", "=", "None", ",", "window", "=", "'hann'", ",", "nproc", "=", "1", ")", ":", "from", ".", ".", "spectrogram", ".", "coherence", "im...
36.897436
22.051282
def transform(self, X, y=None, sample_weight=None): ''' Transforms the time series data with linear direct value interpolation If y is a time series and passed, it will be transformed as well The time dimension is removed from the data Parameters ---------- X : array-like, shape [n_series, ...] Time series data and (optionally) contextual data y : array-like shape [n_series], default = None target vector sample_weight : array-like shape [n_series], default = None sample weights Returns ------- X_new : array-like, shape [n_series, ] transformed time series data y_new : array-like, shape [n_series] expanded target vector sample_weight_new : array-like or None None is returned if target is changed. Otherwise it is returned unchanged. ''' check_ts_data(X, y) xt, xc = get_ts_data_parts(X) yt = y swt = sample_weight # number of data channels d = xt[0][0].shape[0] - 2 # number of series N = len(xt) # retrieve the unique identifiers s = np.unique(xt[0][:, 1]) x_new = [] t_lin = [] # transform x for i in np.arange(N): # splits series into a list for each variable xs = [xt[i][xt[i][:, 1] == s[j]] for j in np.arange(len(s))] # find latest/earliest sample time for each identifier's first/last time sample time t_min = np.max([np.min(xs[j][:, 0]) for j in np.arange(len(s))]) t_max = np.min([np.max(xs[j][:, 0]) for j in np.arange(len(s))]) # Generate a regular series of timestamps starting at tStart and tEnd for sample_period t_lin.append(np.arange(t_min, t_max, self.sample_period)) # Interpolate for the new regular sample times if d == 1: x_new.append( np.column_stack( [self._interp(t_lin[i], xs[j][:, 0], xs[j][:, 2], kind=self.kind) for j in np.arange(len(s))])) elif d > 1: xd = [] for j in np.arange(len(s)): # stack the columns of each variable by dimension d after interpolation to new regular sample times temp = np.column_stack( [(self._interp(t_lin[i], xs[j][:, 0], xs[j][:, k], kind=self.kind)) for k in np.arange(2, 2 + d)]) xd.append(temp) # column stack each of the sensors s -- resulting in s*d columns x_new.append(np.column_stack(xd)) # transform y if yt is not None and len(np.atleast_1d(yt[0])) > 1: # y is a time series swt = None if self.categorical_target is True: yt = [self._interp(t_lin[i], xt[i][:, 0], yt[i], kind='nearest') for i in np.arange(N)] else: yt = [self._interp(t_lin[i], xt[i][:, 0], yt[i], kind=self.kind) for i in np.arange(N)] else: # y is static - leave y alone pass if xc is not None: x_new = TS_Data(x_new, xc) return x_new, yt, swt
[ "def", "transform", "(", "self", ",", "X", ",", "y", "=", "None", ",", "sample_weight", "=", "None", ")", ":", "check_ts_data", "(", "X", ",", "y", ")", "xt", ",", "xc", "=", "get_ts_data_parts", "(", "X", ")", "yt", "=", "y", "swt", "=", "sample...
37.454545
22.863636
def load(): """ Loads the built-in operators into the global test engine. """ for operator in operators: module, symbols = operator[0], operator[1:] path = 'grappa.operators.{}'.format(module) # Dynamically import modules operator = __import__(path, None, None, symbols) # Register operators in the test engine for symbol in symbols: Engine.register(getattr(operator, symbol))
[ "def", "load", "(", ")", ":", "for", "operator", "in", "operators", ":", "module", ",", "symbols", "=", "operator", "[", "0", "]", ",", "operator", "[", "1", ":", "]", "path", "=", "'grappa.operators.{}'", ".", "format", "(", "module", ")", "# Dynamica...
31.5
15.214286
def check_constraint(column, lenum, **kwargs): """ Returns a SQL CHECK constraint string given a column name and a :class:`~coaster.utils.classes.LabeledEnum`. Alembic may not detect the CHECK constraint when autogenerating migrations, so you may need to do this manually using the Python console to extract the SQL string:: from coaster.sqlalchemy import StateManager from your_app.models import YOUR_ENUM print str(StateManager.check_constraint('your_column', YOUR_ENUM).sqltext) :param str column: Column name :param LabeledEnum lenum: :class:`~coaster.utils.classes.LabeledEnum` to retrieve valid values from :param kwargs: Additional options passed to CheckConstraint """ return CheckConstraint( str(column_constructor(column).in_(lenum.keys()).compile(compile_kwargs={'literal_binds': True})), **kwargs)
[ "def", "check_constraint", "(", "column", ",", "lenum", ",", "*", "*", "kwargs", ")", ":", "return", "CheckConstraint", "(", "str", "(", "column_constructor", "(", "column", ")", ".", "in_", "(", "lenum", ".", "keys", "(", ")", ")", ".", "compile", "("...
44.809524
25.666667
def get_result(self): """Perform the call and return a string with the time in hh:mm:ss""" result = get_json_from_remote_server(self.call) seconds = self._get_average_duration(result) if result else 0 return stringify_seconds(seconds)
[ "def", "get_result", "(", "self", ")", ":", "result", "=", "get_json_from_remote_server", "(", "self", ".", "call", ")", "seconds", "=", "self", ".", "_get_average_duration", "(", "result", ")", "if", "result", "else", "0", "return", "stringify_seconds", "(", ...
52.4
12.8
def get_from_archive(self, feed_id): """ Retrieves feed that was persisted as .xml file by its id (= filename) Note: No check on feed validity. file content is assumed correct :param feed_id: :return: the atom feed as string """ file_path = self.feed_repository + '/' + str(feed_id) + '.xml' if not os.path.isfile(file_path): raise FeedArchiveNotFound() with open(file_path, 'r') as rec_file: return rec_file.read()
[ "def", "get_from_archive", "(", "self", ",", "feed_id", ")", ":", "file_path", "=", "self", ".", "feed_repository", "+", "'/'", "+", "str", "(", "feed_id", ")", "+", "'.xml'", "if", "not", "os", ".", "path", ".", "isfile", "(", "file_path", ")", ":", ...
41.666667
11.166667
def _construct_state_machines(self): """ :return: dict in format <state_machine_common_name: instance_of_the_state_machine> """ state_machines = dict() for state_machine in [StateMachineRecomputing(self.logger, self), StateMachineContinuous(self.logger, self), StateMachineDiscrete(self.logger, self), StateMachineFreerun(self.logger)]: state_machines[state_machine.name] = state_machine return state_machines
[ "def", "_construct_state_machines", "(", "self", ")", ":", "state_machines", "=", "dict", "(", ")", "for", "state_machine", "in", "[", "StateMachineRecomputing", "(", "self", ".", "logger", ",", "self", ")", ",", "StateMachineContinuous", "(", "self", ".", "lo...
59.444444
18.333333
async def create_collection(db, model_class: MongoCollectionMixin): ''' Creates a MongoDB collection and all the declared indices in the model's ``Meta`` class :param db: A database handle :type db: motor.motor_asyncio.AsyncIOMotorClient :param model_class: The model to create :type model_class: Subclass of ``Model`` mixed with ``MongoCollectionMixin`` ''' name = model_class.get_collection_name() if name: try: # create collection coll = await db.create_collection(name, **model_class._meta.creation_args) except CollectionInvalid: # collection already exists coll = db[name] # create indices if hasattr(model_class._meta, 'indices') and isinstance(model_class._meta.indices, list): for index in model_class._meta.indices: try: index_kwargs = { 'name': index.get('name', '_'.join([x[0] for x in index['fields']])), 'unique': index.get('unique', False), 'sparse': index.get('sparse', False), 'expireAfterSeconds': index.get('expireAfterSeconds', None), 'background': True } if 'partialFilterExpression' in index: index_kwargs['partialFilterExpression'] = index.get('partialFilterExpression', {}) await db[name].create_index( index['fields'], **index_kwargs ) except OperationFailure as ex: pass # index already exists ? TODO: do something with this return coll return None
[ "async", "def", "create_collection", "(", "db", ",", "model_class", ":", "MongoCollectionMixin", ")", ":", "name", "=", "model_class", ".", "get_collection_name", "(", ")", "if", "name", ":", "try", ":", "# create collection", "coll", "=", "await", "db", ".", ...
40.511628
24
def predict(self, df_data, graph=None, **kwargs): """Orient a graph using the method defined by the arguments. Depending on the type of `graph`, this function process to execute different functions: 1. If ``graph`` is a ``networkx.DiGraph``, then ``self.orient_directed_graph`` is executed. 2. If ``graph`` is a ``networkx.Graph``, then ``self.orient_undirected_graph`` is executed. 3. If ``graph`` is a ``None``, then ``self.create_graph_from_data`` is executed. Args: df_data (pandas.DataFrame): DataFrame containing the observational data. graph (networkx.DiGraph or networkx.Graph or None): Prior knowledge on the causal graph. .. warning:: Requirement : Name of the nodes in the graph must correspond to the name of the variables in df_data """ if graph is None: return self.create_graph_from_data(df_data, **kwargs) elif isinstance(graph, nx.DiGraph): return self.orient_directed_graph(df_data, graph, **kwargs) elif isinstance(graph, nx.Graph): return self.orient_undirected_graph(df_data, graph, **kwargs) else: print('Unknown Graph type') raise ValueError
[ "def", "predict", "(", "self", ",", "df_data", ",", "graph", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "graph", "is", "None", ":", "return", "self", ".", "create_graph_from_data", "(", "df_data", ",", "*", "*", "kwargs", ")", "elif", "isi...
46.37037
26.703704
def _rest_post(self, suburi, request_headers, request_body): """REST POST operation. The response body after the operation could be the new resource, or ExtendedError, or it could be empty. """ return self._rest_op('POST', suburi, request_headers, request_body)
[ "def", "_rest_post", "(", "self", ",", "suburi", ",", "request_headers", ",", "request_body", ")", ":", "return", "self", ".", "_rest_op", "(", "'POST'", ",", "suburi", ",", "request_headers", ",", "request_body", ")" ]
42.285714
19.142857
def preview(): # pragma: no coverage """launch an HTTP to preview the website""" Handler = http.server.SimpleHTTPRequestHandler socketserver.TCPServer.allow_reuse_address = True port = CONFIG['http_port'] httpd = socketserver.TCPServer(("", port), Handler) os.chdir(CONFIG['output_to']) try: logger.info("and ready to test at " "http://127.0.0.1:%d" % CONFIG['http_port']) logger.info("Hit Ctrl+C to exit") httpd.serve_forever() except KeyboardInterrupt: httpd.shutdown()
[ "def", "preview", "(", ")", ":", "# pragma: no coverage", "Handler", "=", "http", ".", "server", ".", "SimpleHTTPRequestHandler", "socketserver", ".", "TCPServer", ".", "allow_reuse_address", "=", "True", "port", "=", "CONFIG", "[", "'http_port'", "]", "httpd", ...
38.785714
11.142857
def _querystring(self): """Additional keyword arguments""" kw = {} if self.status: kw["status"] = "all" if self.links: kw["links"] = "all" if self.include is not None: kw["include"] = self.include if self.subject_key_type != "SubjectName": kw["subjectKeyType"] = self.subject_key_type return kw
[ "def", "_querystring", "(", "self", ")", ":", "kw", "=", "{", "}", "if", "self", ".", "status", ":", "kw", "[", "\"status\"", "]", "=", "\"all\"", "if", "self", ".", "links", ":", "kw", "[", "\"links\"", "]", "=", "\"all\"", "if", "self", ".", "i...
27.642857
16.214286
def transitive_reduction(G): """ Returns a transitive reduction of a graph. The original graph is not modified. A transitive reduction H of G has a path from x to y if and only if there was a path from x to y in G. Deleting any edge of H destroys this property. A transitive reduction is not unique in general. A transitive reduction has the same transitive closure as the original graph. A transitive reduction of a complete graph is a tree. A transitive reduction of a tree is itself. >>> G = nx.DiGraph([(1, 2), (1, 3), (2, 3), (2, 4), (3, 4)]) >>> H = transitive_reduction(G) >>> H.edges() [(1, 2), (2, 3), (3, 4)] """ H = G.copy() for a, b, w in G.edges_iter(data=True): # Try deleting the edge, see if we still have a path # between the vertices H.remove_edge(a, b) if not nx.has_path(H, a, b): # we shouldn't have deleted it H.add_edge(a, b, w) return H
[ "def", "transitive_reduction", "(", "G", ")", ":", "H", "=", "G", ".", "copy", "(", ")", "for", "a", ",", "b", ",", "w", "in", "G", ".", "edges_iter", "(", "data", "=", "True", ")", ":", "# Try deleting the edge, see if we still have a path", "# between th...
35.518519
18.407407
def call_hook(self, hook, *args, **kwargs): """ Calls each registered hook """ for function in self.hooks[hook]: function.__call__(*args, **kwargs)
[ "def", "call_hook", "(", "self", ",", "hook", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "function", "in", "self", ".", "hooks", "[", "hook", "]", ":", "function", ".", "__call__", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
31
2.666667
def validate_url(self, original_string): """Returns the original string if it was valid, raises an argument error if it's not. """ # nipped from stack overflow: http://stackoverflow.com/questions/827557/how-do-you-validate-a-url-with-a-regular-expression-in-python # I preferred this to the thorough regex approach for simplicity and # readability pieces = urlparse.urlparse(original_string) try: if self.path_only: assert not any([pieces.scheme, pieces.netloc]) assert pieces.path else: assert all([pieces.scheme, pieces.netloc]) valid_chars = set(string.letters + string.digits + ":-_.") assert set(pieces.netloc) <= valid_chars assert pieces.scheme in ['http', 'https'] except AssertionError as e: raise ArgumentError(self.item_name, "The input you've provided is not a valid URL.") return pieces
[ "def", "validate_url", "(", "self", ",", "original_string", ")", ":", "# nipped from stack overflow: http://stackoverflow.com/questions/827557/how-do-you-validate-a-url-with-a-regular-expression-in-python", "# I preferred this to the thorough regex approach for simplicity and", "# readability", ...
43.956522
21.478261
def makePics(self): """convert every .image we find to a ./swhlab/ image""" rescanNeeded=False for fname in smartSort(self.fnames): if fname in self.fnames2: continue ext=os.path.splitext(fname)[1].lower() if ext in [".jpg",".png"]: if not fname in self.abfFolder2: self.log.debug("copying %s",fname) shutil.copy(os.path.join(self.abfFolder,fname),os.path.join(self.abfFolder2,fname)) rescanNeeded=True if ext in [".tif",".tiff"]: if not fname+".jpg" in self.fnames2: self.log.debug("converting %s",fname) swhlab.swh_image.TIF_to_jpg(os.path.join(self.abfFolder,fname),saveAs=os.path.join(self.abfFolder2,fname+".jpg")) rescanNeeded=True if rescanNeeded: self.log.debug("new pics, so a rescan is needed...") self.log.debug("REBUILDING ALL RECOMMENDED!!!!!!!!!!!") self.folderScan()
[ "def", "makePics", "(", "self", ")", ":", "rescanNeeded", "=", "False", "for", "fname", "in", "smartSort", "(", "self", ".", "fnames", ")", ":", "if", "fname", "in", "self", ".", "fnames2", ":", "continue", "ext", "=", "os", ".", "path", ".", "splite...
49.761905
17.238095
def cloneInto(self, newStore, avatars): """ Create a copy of this LoginAccount and all associated LoginMethods in a different Store. Return the copied LoginAccount. """ la = LoginAccount(store=newStore, password=self.password, avatars=avatars, disabled=self.disabled) for siteMethod in self.store.query(LoginMethod, LoginMethod.account == self): LoginMethod(store=newStore, localpart=siteMethod.localpart, domain=siteMethod.domain, internal=siteMethod.internal, protocol=siteMethod.protocol, verified=siteMethod.verified, account=la) return la
[ "def", "cloneInto", "(", "self", ",", "newStore", ",", "avatars", ")", ":", "la", "=", "LoginAccount", "(", "store", "=", "newStore", ",", "password", "=", "self", ".", "password", ",", "avatars", "=", "avatars", ",", "disabled", "=", "self", ".", "dis...
42.9
12.9
def cudnnSetFilter4dDescriptor(wDesc, dataType, format, k, c, h, w): """" Initialize a filter descriptor. This function initializes a previously created filter descriptor object into a 4D filter. Filters layout must be contiguous in memory. Parameters ---------- wDesc : cudnnFilterDescriptor Handle to a previously created filter descriptor. dataType : cudnnDataType Data type. format: cudnnTensorFormat Tensor format k : int Number of output feature maps. c : int Number of input feature maps. h : int Height of each filter. w : int Width of each filter. """ status = _libcudnn.cudnnSetFilter4dDescriptor(wDesc, dataType, format, k, c, h, w) cudnnCheckStatus(status)
[ "def", "cudnnSetFilter4dDescriptor", "(", "wDesc", ",", "dataType", ",", "format", ",", "k", ",", "c", ",", "h", ",", "w", ")", ":", "status", "=", "_libcudnn", ".", "cudnnSetFilter4dDescriptor", "(", "wDesc", ",", "dataType", ",", "format", ",", "k", ",...
28.333333
20.518519
def get_component(id): """ Gets a Cachet component by id """ components = cachet.Components(endpoint=ENDPOINT) component = json.loads(components.get(id=id)) return component['data']
[ "def", "get_component", "(", "id", ")", ":", "components", "=", "cachet", ".", "Components", "(", "endpoint", "=", "ENDPOINT", ")", "component", "=", "json", ".", "loads", "(", "components", ".", "get", "(", "id", "=", "id", ")", ")", "return", "compon...
28.428571
8.428571
def interactive_console(self): """ Opens an interactive console Returns: lago.utils.CommandStatus: result of the virsh command execution """ if not self.running(): raise RuntimeError('VM %s is not running' % self._libvirt_.name) virsh_command = [ "virsh", "-c", config.get('libvirt_url'), "console", self._libvirt_name(), ] return utils.run_interactive_command(command=virsh_command, )
[ "def", "interactive_console", "(", "self", ")", ":", "if", "not", "self", ".", "running", "(", ")", ":", "raise", "RuntimeError", "(", "'VM %s is not running'", "%", "self", ".", "_libvirt_", ".", "name", ")", "virsh_command", "=", "[", "\"virsh\"", ",", "...
30.470588
17.882353
def get_record(self, dns_type, name): """ Get a dns record :param dns_type: :param name: :return: """ try: record = [record for record in self.dns_records if record['type'] == dns_type and record['name'] == name][0] except IndexError: raise RecordNotFound( 'Cannot find the specified dns record in domain {domain}' .format(domain=name)) return record
[ "def", "get_record", "(", "self", ",", "dns_type", ",", "name", ")", ":", "try", ":", "record", "=", "[", "record", "for", "record", "in", "self", ".", "dns_records", "if", "record", "[", "'type'", "]", "==", "dns_type", "and", "record", "[", "'name'",...
32.4
16.266667
def parse_skypos(ra, dec): """ Function to parse RA and Dec input values and turn them into decimal degrees Input formats could be: ["nn","nn","nn.nn"] "nn nn nn.nnn" "nn:nn:nn.nn" "nnH nnM nn.nnS" or "nnD nnM nn.nnS" nn.nnnnnnnn "nn.nnnnnnn" """ rval = make_val_float(ra) dval = make_val_float(dec) if rval is None: rval, dval = radec_hmstodd(ra, dec) return rval, dval
[ "def", "parse_skypos", "(", "ra", ",", "dec", ")", ":", "rval", "=", "make_val_float", "(", "ra", ")", "dval", "=", "make_val_float", "(", "dec", ")", "if", "rval", "is", "None", ":", "rval", ",", "dval", "=", "radec_hmstodd", "(", "ra", ",", "dec", ...
23.473684
17.157895
def get_rule_by_pk(self, id_rule): """ Get a rule by its identifier :param id_rule: Rule identifier. :return: Seguinte estrutura :: { 'rule': {'id': < id >, 'environment': < Environment Object >, 'content': < content >, 'name': < name >, 'custom': < custom > }} :raise AmbienteNaoExisteError: Ambiente não cadastrado. :raise InvalidValueError: Invalid parameter. :raise UserNotAuthorizedError: Permissão negada. :raise DataBaseError: Falha na networkapi ao acessar o banco de dados. :raise XMLError: Falha na networkapi ao ler o XML de requisição ou gerar o XML de resposta. """ url = 'rule/get_by_id/' + str(id_rule) code, xml = self.submit(None, 'GET', url) return self.response(code, xml)
[ "def", "get_rule_by_pk", "(", "self", ",", "id_rule", ")", ":", "url", "=", "'rule/get_by_id/'", "+", "str", "(", "id_rule", ")", "code", ",", "xml", "=", "self", ".", "submit", "(", "None", ",", "'GET'", ",", "url", ")", "return", "self", ".", "resp...
34.08
17.6
def algorithm(G, method_name, **kwargs): """ Apply a ``method`` from NetworkX to all :ref:`networkx.Graph <networkx:graph>` objects in the :class:`.GraphCollection` ``G``. For options, see the `list of algorithms <http://networkx.github.io/documentation/networkx-1.9/reference/algorithms.html>`_ in the NetworkX documentation. Not all of these have been tested. Parameters ---------- G : :class:`.GraphCollection` The :class:`.GraphCollection` to analyze. The specified method will be applied to each graph in ``G``. method_name : string Name of a method in NetworkX to execute on graph collection. **kwargs A list of keyword arguments that should correspond to the parameters of the specified method. Returns ------- results : dict Indexed by element (node or edge) and graph index (e.g. ``date``). Raises ------ ValueError If no such method exists. Examples -------- *Betweenness centrality:* (``G`` is a :class:`.GraphCollection`\) .. code-block:: python >>> from tethne.analyze import collection >>> BC = collection.algorithm(G, 'betweenness_centrality') >>> print BC[0] {1999: 0.010101651117889644, 2000: 0.0008689093723107329, 2001: 0.010504898852426189, 2002: 0.009338654511194512, 2003: 0.007519105636349891} """ warnings.warn("To be removed in 0.8. Use GraphCollection.analyze instead.", DeprecationWarning) return G.analyze(method_name, **kwargs)
[ "def", "algorithm", "(", "G", ",", "method_name", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "\"To be removed in 0.8. Use GraphCollection.analyze instead.\"", ",", "DeprecationWarning", ")", "return", "G", ".", "analyze", "(", "method_name", ...
30.313725
23.137255
def add_directory(self, path, ignore=None): """Add ``*.py`` files under the directory ``path`` to the archive. """ for root, dirs, files in os.walk(path): arc_prefix = os.path.relpath(root, os.path.dirname(path)) # py3 remove pyc cache dirs. if '__pycache__' in dirs: dirs.remove('__pycache__') for f in files: dest_path = os.path.join(arc_prefix, f) # ignore specific files if ignore and ignore(dest_path): continue if f.endswith('.pyc') or f.endswith('.c'): continue f_path = os.path.join(root, f) self.add_file(f_path, dest_path)
[ "def", "add_directory", "(", "self", ",", "path", ",", "ignore", "=", "None", ")", ":", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "path", ")", ":", "arc_prefix", "=", "os", ".", "path", ".", "relpath", "(", "root", "...
37
12.85
def load(self, name): """Construct an object from a registered factory. Parameters ---------- name : str Name with which the factory was registered. """ try: return self._factories[name]() except KeyError: raise ValueError( "no %s factory registered under name %r, options are: %r" % (self.interface.__name__, name, sorted(self._factories)), )
[ "def", "load", "(", "self", ",", "name", ")", ":", "try", ":", "return", "self", ".", "_factories", "[", "name", "]", "(", ")", "except", "KeyError", ":", "raise", "ValueError", "(", "\"no %s factory registered under name %r, options are: %r\"", "%", "(", "sel...
31.066667
19.466667
def _get_pretty_body(headers, body): """ Return a pretty printed body using the Content-Type header information :param headers: Headers for the request/response (dict) :param body: Body to pretty print (string) :return: Body pretty printed (string) """ if HEADER_CONTENT_TYPE in headers: if HEADER_REPRESENTATION_XML == headers[HEADER_CONTENT_TYPE]: xml_parsed = parseString(body) pretty_xml_as_string = xml_parsed.toprettyxml() return pretty_xml_as_string else: if HEADER_REPRESENTATION_JSON in headers[HEADER_CONTENT_TYPE]: parsed = json.loads(body) return json.dumps(parsed, sort_keys=True, indent=4) else: return body else: return body
[ "def", "_get_pretty_body", "(", "headers", ",", "body", ")", ":", "if", "HEADER_CONTENT_TYPE", "in", "headers", ":", "if", "HEADER_REPRESENTATION_XML", "==", "headers", "[", "HEADER_CONTENT_TYPE", "]", ":", "xml_parsed", "=", "parseString", "(", "body", ")", "pr...
37.333333
15.904762
def generate_signature(secret, verb, url, nonce, data): """Generate a request signature compatible with BitMEX.""" # Parse the url so we can remove the base and extract just the path. parsedURL = urllib.parse.urlparse(url) path = parsedURL.path if parsedURL.query: path = path + '?' + parsedURL.query # print "Computing HMAC: %s" % verb + path + str(nonce) + data message = bytes(verb + path + str(nonce) + data, 'utf-8') signature = hmac.new(secret.encode('utf-8'), message, digestmod=hashlib.sha256).hexdigest() return signature
[ "def", "generate_signature", "(", "secret", ",", "verb", ",", "url", ",", "nonce", ",", "data", ")", ":", "# Parse the url so we can remove the base and extract just the path.", "parsedURL", "=", "urllib", ".", "parse", ".", "urlparse", "(", "url", ")", "path", "=...
40.8
17.866667
def magnetic_deformation(structure_A, structure_B): """ Calculates 'magnetic deformation proxy', a measure of deformation (norm of finite strain) between 'non-magnetic' (non-spin-polarized) and ferromagnetic structures. Adapted from Bocarsly et al. 2017, doi: 10.1021/acs.chemmater.6b04729 :param structure_A: Structure :param structure_B: Structure :return: """ # retrieve orderings of both input structures ordering_a = CollinearMagneticStructureAnalyzer( structure_A, overwrite_magmom_mode="none" ).ordering ordering_b = CollinearMagneticStructureAnalyzer( structure_B, overwrite_magmom_mode="none" ).ordering # get a type string, this is either 'NM-FM' for between non-magnetic # and ferromagnetic, as in Bocarsly paper, or e.g. 'FM-AFM' type_str = "{}-{}".format(ordering_a.value, ordering_b.value) lattice_a = structure_A.lattice.matrix.T lattice_b = structure_B.lattice.matrix.T lattice_a_inv = np.linalg.inv(lattice_a) p = np.dot(lattice_a_inv, lattice_b) eta = 0.5 * (np.dot(p.T, p) - np.identity(3)) w, v = np.linalg.eig(eta) deformation = 100 * (1.0 / 3.0) * np.sqrt(w[0] ** 2 + w[1] ** 2 + w[2] ** 2) MagneticDeformation = namedtuple("MagneticDeformation", "type deformation") return MagneticDeformation(deformation=deformation, type=type_str)
[ "def", "magnetic_deformation", "(", "structure_A", ",", "structure_B", ")", ":", "# retrieve orderings of both input structures", "ordering_a", "=", "CollinearMagneticStructureAnalyzer", "(", "structure_A", ",", "overwrite_magmom_mode", "=", "\"none\"", ")", ".", "ordering", ...
35.605263
18.394737
def get_offset(cls, info): """Calculate the offset to the Xing header from the start of the MPEG header including sync based on the MPEG header's content. """ assert info.layer == 3 if info.version == 1: if info.mode != 3: return 36 else: return 21 else: if info.mode != 3: return 21 else: return 13
[ "def", "get_offset", "(", "cls", ",", "info", ")", ":", "assert", "info", ".", "layer", "==", "3", "if", "info", ".", "version", "==", "1", ":", "if", "info", ".", "mode", "!=", "3", ":", "return", "36", "else", ":", "return", "21", "else", ":", ...
26.176471
17.529412
def get_docker_network(self, container_id, all_stats): """Return the container network usage using the Docker API (v1.0 or higher). Input: id is the full container id Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}. with: time_since_update: number of seconds elapsed between the latest grab rx: Number of byte received tx: Number of byte transmited """ # Init the returned dict network_new = {} # Read the rx/tx stats (in bytes) try: netcounters = all_stats["networks"] except KeyError as e: # all_stats do not have NETWORK information logger.debug("docker plugin - Cannot grab NET usage for container {} ({})".format(container_id, e)) logger.debug(all_stats) # No fallback available... return network_new # Previous network interface stats are stored in the network_old variable if not hasattr(self, 'inetcounters_old'): # First call, we init the network_old var self.netcounters_old = {} try: self.netcounters_old[container_id] = netcounters except (IOError, UnboundLocalError): pass if container_id not in self.netcounters_old: try: self.netcounters_old[container_id] = netcounters except (IOError, UnboundLocalError): pass else: # By storing time data we enable Rx/s and Tx/s calculations in the # XML/RPC API, which would otherwise be overly difficult work # for users of the API try: network_new['time_since_update'] = getTimeSinceLastUpdate('docker_net_{}'.format(container_id)) network_new['rx'] = netcounters["eth0"]["rx_bytes"] - self.netcounters_old[container_id]["eth0"]["rx_bytes"] network_new['tx'] = netcounters["eth0"]["tx_bytes"] - self.netcounters_old[container_id]["eth0"]["tx_bytes"] network_new['cumulative_rx'] = netcounters["eth0"]["rx_bytes"] network_new['cumulative_tx'] = netcounters["eth0"]["tx_bytes"] except KeyError as e: # all_stats do not have INTERFACE information logger.debug("docker plugin - Cannot grab network interface usage for container {} ({})".format(container_id, e)) logger.debug(all_stats) # Save stats to compute next bitrate self.netcounters_old[container_id] = netcounters # Return the stats return network_new
[ "def", "get_docker_network", "(", "self", ",", "container_id", ",", "all_stats", ")", ":", "# Init the returned dict", "network_new", "=", "{", "}", "# Read the rx/tx stats (in bytes)", "try", ":", "netcounters", "=", "all_stats", "[", "\"networks\"", "]", "except", ...
45.754386
24.157895
def load_scrap(self, path): """ Load scraper settings from file :param path: Path to file :type path: str :rtype: None :raises WEBFileException: Failed to load settings :raises WEBParameterException: Missing parameters in file """ try: conf = self.load_settings(path) except: # Should only be IOError self.exception("Failed to load file") raise WEBFileException("Failed to load from {}".format(path)) if "scheme" not in conf: raise WEBParameterException("Missing scheme definition") if "url" not in conf: raise WEBParameterException("Missing url definition") version = conf.get('version', None) if version != "1.0": raise WEBParameterException( "Unsupported version {}".format(version) ) self.scheme = conf['scheme'] self.url = conf['url'] self.timeout = conf.get('timeout', self.timeout) if conf.get('html2text'): self._set_html2text(conf['html2text'])
[ "def", "load_scrap", "(", "self", ",", "path", ")", ":", "try", ":", "conf", "=", "self", ".", "load_settings", "(", "path", ")", "except", ":", "# Should only be IOError", "self", ".", "exception", "(", "\"Failed to load file\"", ")", "raise", "WEBFileExcepti...
35.258065
14.806452
def set_limits(self, limits, coord='data'): """Set the bounding box of the viewer extents. Parameters ---------- limits : tuple or None A tuple setting the extents of the viewer in the form of ``(ll_pt, ur_pt)``. """ if limits is not None: if len(limits) != 2: raise ValueError("limits takes a 2 tuple, or None") # convert to data coordinates crdmap = self.get_coordmap(coord) limits = crdmap.to_data(limits) self.t_.set(limits=limits)
[ "def", "set_limits", "(", "self", ",", "limits", ",", "coord", "=", "'data'", ")", ":", "if", "limits", "is", "not", "None", ":", "if", "len", "(", "limits", ")", "!=", "2", ":", "raise", "ValueError", "(", "\"limits takes a 2 tuple, or None\"", ")", "# ...
31.388889
15.222222
def sha1(s): """ Returns a sha1 of the given string """ h = hashlib.new('sha1') h.update(s) return h.hexdigest()
[ "def", "sha1", "(", "s", ")", ":", "h", "=", "hashlib", ".", "new", "(", "'sha1'", ")", "h", ".", "update", "(", "s", ")", "return", "h", ".", "hexdigest", "(", ")" ]
21.166667
13.666667
def connect(self): """Connects to the Graphite server if not already connected.""" if self.sock is not None: return backoff = 0.01 while True: try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(5) sock.connect((self.host, self.port)) self.sock = sock return except socket.error: time.sleep(random.uniform(0, 2.0*backoff)) backoff = min(backoff*2.0, 5.0)
[ "def", "connect", "(", "self", ")", ":", "if", "self", ".", "sock", "is", "not", "None", ":", "return", "backoff", "=", "0.01", "while", "True", ":", "try", ":", "sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", "....
30.4
16.466667
def register_message_callback(self, type_, from_, cb): """ Register a callback to be called when a message is received. :param type_: Message type to listen for, or :data:`None` for a wildcard match. :type type_: :class:`~.MessageType` or :data:`None` :param from_: Sender JID to listen for, or :data:`None` for a wildcard match. :type from_: :class:`~aioxmpp.JID` or :data:`None` :param cb: Callback function to call :raises ValueError: if another function is already registered for the same ``(type_, from_)`` pair. :raises ValueError: if `type_` is not a valid :class:`~.MessageType` (and cannot be cast to a :class:`~.MessageType`) `cb` will be called whenever a message stanza matching the `type_` and `from_` is received, according to the wildcarding rules below. More specific callbacks win over less specific callbacks, and the match on the `from_` address takes precedence over the match on the `type_`. See :meth:`.SimpleStanzaDispatcher.register_callback` for the exact wildcarding rules. .. versionchanged:: 0.7 The `type_` argument is now supposed to be a :class:`~.MessageType` member. .. deprecated:: 0.7 Passing a :class:`str` as `type_` argument is deprecated and will raise a :class:`TypeError` as of the 1.0 release. See the Changelog for :ref:`api-changelog-0.7` for further details on how to upgrade your code efficiently. .. deprecated:: 0.9 This method has been deprecated in favour of and is now implemented in terms of the :class:`aioxmpp.dispatcher.SimpleMessageDispatcher` service. It is equivalent to call :meth:`~.SimpleStanzaDispatcher.register_callback`, except that the latter is not deprecated. """ if type_ is not None: type_ = self._coerce_enum(type_, structs.MessageType) warnings.warn( "register_message_callback is deprecated; use " "aioxmpp.dispatcher.SimpleMessageDispatcher instead", DeprecationWarning, stacklevel=2 ) self._xxx_message_dispatcher.register_callback( type_, from_, cb, )
[ "def", "register_message_callback", "(", "self", ",", "type_", ",", "from_", ",", "cb", ")", ":", "if", "type_", "is", "not", "None", ":", "type_", "=", "self", ".", "_coerce_enum", "(", "type_", ",", "structs", ".", "MessageType", ")", "warnings", ".", ...
40.433333
24.266667
def parse(self, s): """Parses the input string, and returns a reference to the created AST's root""" with self.lock: try: return self.parser.parse(s, lexer=self.lexer) except InvalidIEMLObjectArgument as e: raise CannotParse(s, str(e)) except CannotParse as e: e.s = s raise e
[ "def", "parse", "(", "self", ",", "s", ")", ":", "with", "self", ".", "lock", ":", "try", ":", "return", "self", ".", "parser", ".", "parse", "(", "s", ",", "lexer", "=", "self", ".", "lexer", ")", "except", "InvalidIEMLObjectArgument", "as", "e", ...
38.3
13.5
def to_event(self, event_type, field_name=None, depth=None): """Constructs an IonEvent from this _IonNature value. Args: event_type (IonEventType): The type of the resulting event. field_name (Optional[text]): The field name associated with this value, if any. depth (Optional[int]): The depth of this value. Returns: An IonEvent with the properties from this value. """ if self.ion_event is None: value = self if isinstance(self, IonPyNull): value = None self.ion_event = IonEvent(event_type, ion_type=self.ion_type, value=value, field_name=field_name, annotations=self.ion_annotations, depth=depth) return self.ion_event
[ "def", "to_event", "(", "self", ",", "event_type", ",", "field_name", "=", "None", ",", "depth", "=", "None", ")", ":", "if", "self", ".", "ion_event", "is", "None", ":", "value", "=", "self", "if", "isinstance", "(", "self", ",", "IonPyNull", ")", "...
44.055556
24.055556
def notify(self, new_issues, existing_issues, fixed_issues): """Send notifications (email, slack, etc.) for any issues that are currently open or has just been closed Args: new_issues (`list` of :obj:`DomainHijackIssue`): List of newly discovered issues existing_issues (`list` of :obj:`DomainHijackIssue`): List of existing open issues fixed_issues (`list` of `dict`): List of fixed issues Returns: None """ if len(new_issues + existing_issues + fixed_issues) > 0: maxlen = max(len(x['properties']['source']) for x in (new_issues + existing_issues + fixed_issues)) + 2 text_tmpl = get_template('domain_hijacking.txt') html_tmpl = get_template('domain_hijacking.html') issues_text = text_tmpl.render( new_issues=new_issues, existing_issues=existing_issues, fixed_issues=fixed_issues, maxlen=maxlen ) issues_html = html_tmpl.render( new_issues=new_issues, existing_issues=existing_issues, fixed_issues=fixed_issues, maxlen=maxlen ) try: send_notification( subsystem=self.name, recipients=[NotificationContact('email', addr) for addr in self.recipients], subject=self.subject, body_html=issues_html, body_text=issues_text ) except Exception as ex: self.log.exception('Failed sending notification email: {}'.format(ex))
[ "def", "notify", "(", "self", ",", "new_issues", ",", "existing_issues", ",", "fixed_issues", ")", ":", "if", "len", "(", "new_issues", "+", "existing_issues", "+", "fixed_issues", ")", ">", "0", ":", "maxlen", "=", "max", "(", "len", "(", "x", "[", "'...
43.526316
19.894737
def switch_to(self, newstate): """Switch to a new state newstate : int or string Can be either the value of the state, or the label of the state""" if type(newstate) is int: if newstate in self._states: self._curcode = newstate else: raise Exception("The state value " + str(newstate) + " does not exist.") else: if newstate in self._scodes: self._curcode = self._scodes[newstate] else: raise Exception("The state " + newstate + " does not exist.")
[ "def", "switch_to", "(", "self", ",", "newstate", ")", ":", "if", "type", "(", "newstate", ")", "is", "int", ":", "if", "newstate", "in", "self", ".", "_states", ":", "self", ".", "_curcode", "=", "newstate", "else", ":", "raise", "Exception", "(", "...
41.666667
15.933333
def diet(filename, configuration): ''' Squeeze files if there is a pipeline defined for them or leave them be otherwise. :param filename: filename of the file to process :param configuration: configuration dict describing commands and pipelines :type configuration: dict :return: has file changed :rtype: bool ''' changed = False if not isfile(filename): raise NotFileDietException('Passed filename does not point to a file') conf = copy.deepcopy(DEFAULT_CONFIG) if not configuration.get('parsed'): new_config = parse_configuration(configuration) else: new_config = configuration update_configuration(conf, new_config) filetype = determine_type(filename) squeeze_cmd = conf['pipelines'].get(filetype) if squeeze_cmd: tmpbackup_ext = 'diet_internal' ext = conf.get('backup', tmpbackup_ext) backup = backup_file(filename, ext) size = os.stat(filename).st_size new_size = squeeze(squeeze_cmd, filename, backup) if not conf.get('keep_processed', False) and new_size > size: copy_if_different(backup, filename) # Delete backup, if it was internal if not conf.get('backup'): os.remove(backup) changed = True return changed
[ "def", "diet", "(", "filename", ",", "configuration", ")", ":", "changed", "=", "False", "if", "not", "isfile", "(", "filename", ")", ":", "raise", "NotFileDietException", "(", "'Passed filename does not point to a file'", ")", "conf", "=", "copy", ".", "deepcop...
29.744186
20.209302
def init_database(self): """Initialize database, if it has not been initialized yet.""" with contextlib.closing(self.database.cursor()) as cursor: cursor.execute(""" CREATE TABLE IF NOT EXISTS users( id INTEGER PRIMARY KEY AUTOINCREMENT, name VARCHAR(32) ) """)
[ "def", "init_database", "(", "self", ")", ":", "with", "contextlib", ".", "closing", "(", "self", ".", "database", ".", "cursor", "(", ")", ")", "as", "cursor", ":", "cursor", ".", "execute", "(", "\"\"\"\n CREATE TABLE IF NOT EXISTS users(\n ...
39.222222
10.555556
def _winreg_getShellFolder( name ): """Get a shell folder by string name from the registry""" k = _winreg.OpenKey( _winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" ) try: # should check that it's valid? How? return _winreg.QueryValueEx( k, name )[0] finally: _winreg.CloseKey( k )
[ "def", "_winreg_getShellFolder", "(", "name", ")", ":", "k", "=", "_winreg", ".", "OpenKey", "(", "_winreg", ".", "HKEY_CURRENT_USER", ",", "r\"Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders\"", ")", "try", ":", "# should check that it's valid? How?", ...
34.181818
16.454545
def rsa_enc(data, modulus, exponent): """ Simple RAW RSA encryption method, returns byte string. Returns byte string of the same size as the modulus (left padded with 0) :param data: :param modulus: :param exponent: :return: """ modulus = to_long(modulus) exponent = to_long(exponent) data = to_long(data) return long_to_bytes(pow(data, exponent, modulus), long_byte_size(modulus))
[ "def", "rsa_enc", "(", "data", ",", "modulus", ",", "exponent", ")", ":", "modulus", "=", "to_long", "(", "modulus", ")", "exponent", "=", "to_long", "(", "exponent", ")", "data", "=", "to_long", "(", "data", ")", "return", "long_to_bytes", "(", "pow", ...
27.8
20.2
def x_rolls(self, number, count=0, func=sum): '''Iterator of number dice rolls. :param count: [0] Return list of ``count`` sums :param func: [sum] Apply func to list of individual die rolls func([]) ''' for x in range(number): yield self.roll(count, func)
[ "def", "x_rolls", "(", "self", ",", "number", ",", "count", "=", "0", ",", "func", "=", "sum", ")", ":", "for", "x", "in", "range", "(", "number", ")", ":", "yield", "self", ".", "roll", "(", "count", ",", "func", ")" ]
43
13.857143
def neg(x, context=None): """ Return -x. """ return _apply_function_in_current_context( BigFloat, mpfr.mpfr_neg, (BigFloat._implicit_convert(x),), context, )
[ "def", "neg", "(", "x", ",", "context", "=", "None", ")", ":", "return", "_apply_function_in_current_context", "(", "BigFloat", ",", "mpfr", ".", "mpfr_neg", ",", "(", "BigFloat", ".", "_implicit_convert", "(", "x", ")", ",", ")", ",", "context", ",", ")...
18.181818
17.090909
def get_host_ip(hostname, fallback=None): """ Resolves the IP for a given hostname, or returns the input if it is already an IP. """ if is_ip(hostname): return hostname ip_addr = ns_query(hostname) if not ip_addr: try: ip_addr = socket.gethostbyname(hostname) except Exception: log("Failed to resolve hostname '%s'" % (hostname), level=WARNING) return fallback return ip_addr
[ "def", "get_host_ip", "(", "hostname", ",", "fallback", "=", "None", ")", ":", "if", "is_ip", "(", "hostname", ")", ":", "return", "hostname", "ip_addr", "=", "ns_query", "(", "hostname", ")", "if", "not", "ip_addr", ":", "try", ":", "ip_addr", "=", "s...
27.529412
14.235294
def sersic_constant(self): """ A parameter derived from Sersic index which ensures that effective radius contains 50% of the profile's total integrated light. """ return (2 * self.sersic_index) - (1. / 3.) + (4. / (405. * self.sersic_index)) + ( 46. / (25515. * self.sersic_index ** 2)) + (131. / (1148175. * self.sersic_index ** 3)) - ( 2194697. / (30690717750. * self.sersic_index ** 4))
[ "def", "sersic_constant", "(", "self", ")", ":", "return", "(", "2", "*", "self", ".", "sersic_index", ")", "-", "(", "1.", "/", "3.", ")", "+", "(", "4.", "/", "(", "405.", "*", "self", ".", "sersic_index", ")", ")", "+", "(", "46.", "/", "(",...
64.857143
24.857143
def Unpack(self, msg): """Unpacks the current Any message into specified message.""" descriptor = msg.DESCRIPTOR if not self.Is(descriptor): return False msg.ParseFromString(self.value) return True
[ "def", "Unpack", "(", "self", ",", "msg", ")", ":", "descriptor", "=", "msg", ".", "DESCRIPTOR", "if", "not", "self", ".", "Is", "(", "descriptor", ")", ":", "return", "False", "msg", ".", "ParseFromString", "(", "self", ".", "value", ")", "return", ...
31
12.571429
def __open(self, url, headers=None, data=None, baseurl=""): """Use raw urlopen command.""" headers = headers or {} if not baseurl: baseurl = self.baseurl req = Request("%s%s" % (baseurl, url), headers=headers) _LOGGER.debug(url) try: req.data = urlencode(data).encode('utf-8') except TypeError: pass opener = build_opener() try: resp = opener.open(req) charset = resp.info().get('charset', 'utf-8') data = json.loads(resp.read().decode(charset)) opener.close() _LOGGER.debug(json.dumps(data)) return data except HTTPError as exception_: if exception_.code == 408: _LOGGER.debug("%s", exception_) return False raise TeslaException(exception_.code)
[ "def", "__open", "(", "self", ",", "url", ",", "headers", "=", "None", ",", "data", "=", "None", ",", "baseurl", "=", "\"\"", ")", ":", "headers", "=", "headers", "or", "{", "}", "if", "not", "baseurl", ":", "baseurl", "=", "self", ".", "baseurl", ...
33.346154
15.038462
def bar(self, subset=None, axis=0, color='#d65f5f', width=100, align='left', vmin=None, vmax=None): """ Draw bar chart in the cell backgrounds. Parameters ---------- subset : IndexSlice, optional A valid slice for `data` to limit the style application to. axis : {0 or 'index', 1 or 'columns', None}, default 0 apply to each column (``axis=0`` or ``'index'``), to each row (``axis=1`` or ``'columns'``), or to the entire DataFrame at once with ``axis=None``. color : str or 2-tuple/list If a str is passed, the color is the same for both negative and positive numbers. If 2-tuple/list is used, the first element is the color_negative and the second is the color_positive (eg: ['#d65f5f', '#5fba7d']). width : float, default 100 A number between 0 or 100. The largest value will cover `width` percent of the cell's width. align : {'left', 'zero',' mid'}, default 'left' How to align the bars with the cells. - 'left' : the min value starts at the left of the cell. - 'zero' : a value of zero is located at the center of the cell. - 'mid' : the center of the cell is at (max-min)/2, or if values are all negative (positive) the zero is aligned at the right (left) of the cell. .. versionadded:: 0.20.0 vmin : float, optional Minimum bar value, defining the left hand limit of the bar drawing range, lower values are clipped to `vmin`. When None (default): the minimum value of the data will be used. .. versionadded:: 0.24.0 vmax : float, optional Maximum bar value, defining the right hand limit of the bar drawing range, higher values are clipped to `vmax`. When None (default): the maximum value of the data will be used. .. versionadded:: 0.24.0 Returns ------- self : Styler """ if align not in ('left', 'zero', 'mid'): raise ValueError("`align` must be one of {'left', 'zero',' mid'}") if not (is_list_like(color)): color = [color, color] elif len(color) == 1: color = [color[0], color[0]] elif len(color) > 2: raise ValueError("`color` must be string or a list-like" " of length 2: [`color_neg`, `color_pos`]" " (eg: color=['#d65f5f', '#5fba7d'])") subset = _maybe_numeric_slice(self.data, subset) subset = _non_reducing_slice(subset) self.apply(self._bar, subset=subset, axis=axis, align=align, colors=color, width=width, vmin=vmin, vmax=vmax) return self
[ "def", "bar", "(", "self", ",", "subset", "=", "None", ",", "axis", "=", "0", ",", "color", "=", "'#d65f5f'", ",", "width", "=", "100", ",", "align", "=", "'left'", ",", "vmin", "=", "None", ",", "vmax", "=", "None", ")", ":", "if", "align", "n...
41.304348
21.246377
def destroy(self, request, pk=None): '''For DELETE actions, archive the organization, don't delete.''' org = self.get_object() org.archived = True org.save() return Response(status=status.HTTP_204_NO_CONTENT)
[ "def", "destroy", "(", "self", ",", "request", ",", "pk", "=", "None", ")", ":", "org", "=", "self", ".", "get_object", "(", ")", "org", ".", "archived", "=", "True", "org", ".", "save", "(", ")", "return", "Response", "(", "status", "=", "status",...
40.5
16.5
def _json_to_term_model(term_data): """ Returns a term model created from the passed json data. param: term_data loaded json data """ strptime = datetime.strptime day_format = "%Y-%m-%d" datetime_format = "%Y-%m-%dT%H:%M:%S" term = TermModel() term.year = term_data["Year"] term.quarter = term_data["Quarter"] term.last_day_add = parse_sws_date(term_data["LastAddDay"]) term.first_day_quarter = parse_sws_date(term_data["FirstDay"]) term.last_day_instruction = parse_sws_date(term_data["LastDayOfClasses"]) term.last_day_drop = parse_sws_date(term_data["LastDropDay"]) term.census_day = parse_sws_date(term_data["CensusDay"]) if term_data["ATermLastDay"] is not None: term.aterm_last_date = parse_sws_date(term_data["ATermLastDay"]) if term_data["BTermFirstDay"] is not None: term.bterm_first_date = parse_sws_date(term_data["BTermFirstDay"]) if term_data["LastAddDayATerm"] is not None: term.aterm_last_day_add = parse_sws_date(term_data["LastAddDayATerm"]) if term_data["LastAddDayBTerm"] is not None: term.bterm_last_day_add = parse_sws_date(term_data["LastAddDayBTerm"]) term.last_final_exam_date = parse_sws_date(term_data["LastFinalExamDay"]) try: term.grading_period_open = strptime( term_data["GradingPeriodOpen"], datetime_format) except (TypeError, ValueError): logger.warn('Malformed term_data["GradingPeriodOpen"]: {}'.format( term_data["GradingPeriodOpen"])) term.grading_period_open = strptime( '{}T08:00:00'.format(term_data['LastFinalExamDay']), datetime_format) if term_data["GradingPeriodOpenATerm"] is not None: term.aterm_grading_period_open = strptime( term_data["GradingPeriodOpenATerm"], datetime_format) try: term.grading_period_close = strptime( term_data["GradingPeriodClose"], datetime_format) except (TypeError, ValueError): logger.warn('Malformed term_data["GradingPeriodClose"]: {}'.format( term_data["GradingPeriodClose"])) term.grading_period_close = strptime( '{}T17:00:00'.format(term_data['LastFinalExamDay']), datetime_format) try: term.grade_submission_deadline = strptime( term_data["GradeSubmissionDeadline"], datetime_format) except (TypeError, ValueError): logger.warn( 'Malformed term_data["GradeSubmissionDeadline"]: {}'.format( term_data["GradeSubmissionDeadline"])) term.grade_submission_deadline = strptime( '{}T17:00:00'.format(term_data['LastFinalExamDay']), datetime_format) if term_data["RegistrationServicesStart"] is not None: term.registration_services_start = parse_sws_date( term_data["RegistrationServicesStart"]) if term_data["RegistrationPeriods"][0]["StartDate"] is not None: term.registration_period1_start = parse_sws_date( term_data["RegistrationPeriods"][0]["StartDate"]) if term_data["RegistrationPeriods"][0]["EndDate"] is not None: term.registration_period1_end = parse_sws_date( term_data["RegistrationPeriods"][0]["EndDate"]) if term_data["RegistrationPeriods"][1]["StartDate"] is not None: term.registration_period2_start = parse_sws_date( term_data["RegistrationPeriods"][1]["StartDate"]) if term_data["RegistrationPeriods"][1]["EndDate"] is not None: term.registration_period2_end = parse_sws_date( term_data["RegistrationPeriods"][1]["EndDate"]) if term_data["RegistrationPeriods"][2]["StartDate"] is not None: term.registration_period3_start = parse_sws_date( term_data["RegistrationPeriods"][2]["StartDate"]) if term_data["RegistrationPeriods"][2]["EndDate"] is not None: term.registration_period3_end = parse_sws_date( term_data["RegistrationPeriods"][2]["EndDate"]) term.time_schedule_construction = {} for campus in term_data["TimeScheduleConstruction"]: term.time_schedule_construction[campus.lower()] = True if ( term_data["TimeScheduleConstruction"][campus]) else False term.time_schedule_published = {} for campus in term_data["TimeSchedulePublished"]: term.time_schedule_published[campus.lower()] = True if ( term_data["TimeSchedulePublished"][campus]) else False term.clean_fields() return term
[ "def", "_json_to_term_model", "(", "term_data", ")", ":", "strptime", "=", "datetime", ".", "strptime", "day_format", "=", "\"%Y-%m-%d\"", "datetime_format", "=", "\"%Y-%m-%dT%H:%M:%S\"", "term", "=", "TermModel", "(", ")", "term", ".", "year", "=", "term_data", ...
39.070796
22.787611
def collect_namespaces(metadata): # type: (Mapping[Text, Any]) -> Dict[Text, Text] """Walk through the metadata object, collecting namespace declarations.""" namespaces = {} # type: Dict[Text, Text] if "$import_metadata" in metadata: for value in metadata["$import_metadata"].values(): add_namespaces(collect_namespaces(value), namespaces) if "$namespaces" in metadata: add_namespaces(metadata["$namespaces"], namespaces) return namespaces
[ "def", "collect_namespaces", "(", "metadata", ")", ":", "# type: (Mapping[Text, Any]) -> Dict[Text, Text]", "namespaces", "=", "{", "}", "# type: Dict[Text, Text]", "if", "\"$import_metadata\"", "in", "metadata", ":", "for", "value", "in", "metadata", "[", "\"$import_meta...
48.3
11.5
def _clear_ignore(endpoint_props): ''' Both _clear_dict and _ignore_keys in a single iteration. ''' return dict( (prop_name, prop_val) for prop_name, prop_val in six.iteritems(endpoint_props) if prop_name not in _DO_NOT_COMPARE_FIELDS and prop_val is not None )
[ "def", "_clear_ignore", "(", "endpoint_props", ")", ":", "return", "dict", "(", "(", "prop_name", ",", "prop_val", ")", "for", "prop_name", ",", "prop_val", "in", "six", ".", "iteritems", "(", "endpoint_props", ")", "if", "prop_name", "not", "in", "_DO_NOT_C...
33
24.555556
def logical_or(self, other): """logical_or(t) = self(t) or other(t).""" return self.operation(other, lambda x, y: int(x or y))
[ "def", "logical_or", "(", "self", ",", "other", ")", ":", "return", "self", ".", "operation", "(", "other", ",", "lambda", "x", ",", "y", ":", "int", "(", "x", "or", "y", ")", ")" ]
46.666667
11.333333
def contains_content_items(self, request, pk, course_run_ids, program_uuids): """ Return whether or not the specified content is available to the EnterpriseCustomer. Multiple course_run_ids and/or program_uuids query parameters can be sent to this view to check for their existence in the EnterpriseCustomerCatalogs associated with this EnterpriseCustomer. At least one course run key or program UUID value must be included in the request. """ enterprise_customer = self.get_object() # Maintain plus characters in course key. course_run_ids = [unquote(quote_plus(course_run_id)) for course_run_id in course_run_ids] contains_content_items = False for catalog in enterprise_customer.enterprise_customer_catalogs.all(): contains_course_runs = not course_run_ids or catalog.contains_courses(course_run_ids) contains_program_uuids = not program_uuids or catalog.contains_programs(program_uuids) if contains_course_runs and contains_program_uuids: contains_content_items = True break return Response({'contains_content_items': contains_content_items})
[ "def", "contains_content_items", "(", "self", ",", "request", ",", "pk", ",", "course_run_ids", ",", "program_uuids", ")", ":", "enterprise_customer", "=", "self", ".", "get_object", "(", ")", "# Maintain plus characters in course key.", "course_run_ids", "=", "[", ...
54.227273
33.318182
def path_args(self): """the path converted to list (eg /foo/bar becomes [foo, bar])""" self._path_args = [] path = self.path path_args = list(filter(None, path.split('/'))) return path_args
[ "def", "path_args", "(", "self", ")", ":", "self", ".", "_path_args", "=", "[", "]", "path", "=", "self", ".", "path", "path_args", "=", "list", "(", "filter", "(", "None", ",", "path", ".", "split", "(", "'/'", ")", ")", ")", "return", "path_args"...
37.333333
13.166667
def _lookup_handler(name): """ Look up the implementation of a named handler. Broken out for testing purposes. :param name: The name of the handler to look up. :returns: A factory function for the log handler. """ # Look up and load the handler factory for ep in pkg_resources.iter_entry_points('bark.handler', name): try: # Load and return the handler factory return ep.load() except (ImportError, pkg_resources.UnknownExtra): # Couldn't load it... continue raise ImportError("Unknown log file handler %r" % name)
[ "def", "_lookup_handler", "(", "name", ")", ":", "# Look up and load the handler factory", "for", "ep", "in", "pkg_resources", ".", "iter_entry_points", "(", "'bark.handler'", ",", "name", ")", ":", "try", ":", "# Load and return the handler factory", "return", "ep", ...
30
19.3
def pic_loggedrequiredremoterelease_v1(self): """Update the receiver link sequence.""" log = self.sequences.logs.fastaccess rec = self.sequences.receivers.fastaccess log.loggedrequiredremoterelease[0] = rec.d[0]
[ "def", "pic_loggedrequiredremoterelease_v1", "(", "self", ")", ":", "log", "=", "self", ".", "sequences", ".", "logs", ".", "fastaccess", "rec", "=", "self", ".", "sequences", ".", "receivers", ".", "fastaccess", "log", ".", "loggedrequiredremoterelease", "[", ...
44.6
3.8
def get_attrib(xml, name, tag=None, cast=str, default=None): """Returns the specified attribute from the XML element, raising a ValueError if it is not availaible. :arg xml: the XMLElement instance to get the attribute from. :arg name: the name of the attribute in xml.attrib dictionary. :arg tag: the name of the tag to display with the ValueError if the attribute is missing. """ if name in xml.attrib: return cast(xml.attrib[name]) elif default is not None: return default elif tag is not None: raise ValueError("'{}' is a required attribute of <{}> tag.".format(name, tag))
[ "def", "get_attrib", "(", "xml", ",", "name", ",", "tag", "=", "None", ",", "cast", "=", "str", ",", "default", "=", "None", ")", ":", "if", "name", "in", "xml", ".", "attrib", ":", "return", "cast", "(", "xml", ".", "attrib", "[", "name", "]", ...
44.928571
19.928571
def post_structure(entry, site): """ A post structure with extensions. """ author = entry.authors.all()[0] return {'title': entry.title, 'description': six.text_type(entry.html_content), 'link': '%s://%s%s' % (PROTOCOL, site.domain, entry.get_absolute_url()), # Basic Extensions 'permaLink': '%s://%s%s' % (PROTOCOL, site.domain, entry.get_absolute_url()), 'categories': [cat.title for cat in entry.categories.all()], 'dateCreated': DateTime(entry.creation_date.isoformat()), 'postid': entry.pk, 'userid': author.get_username(), # Useful Movable Type Extensions 'mt_excerpt': entry.excerpt, 'mt_allow_comments': int(entry.comment_enabled), 'mt_allow_pings': (int(entry.pingback_enabled) or int(entry.trackback_enabled)), 'mt_keywords': entry.tags, # Useful Wordpress Extensions 'wp_author': author.get_username(), 'wp_author_id': author.pk, 'wp_author_display_name': author.__str__(), 'wp_password': entry.password, 'wp_slug': entry.slug, 'sticky': entry.featured}
[ "def", "post_structure", "(", "entry", ",", "site", ")", ":", "author", "=", "entry", ".", "authors", ".", "all", "(", ")", "[", "0", "]", "return", "{", "'title'", ":", "entry", ".", "title", ",", "'description'", ":", "six", ".", "text_type", "(", ...
44.896552
10.965517
def get_locations(self, url): """Get valid location header values from responses. :param url: a URL address. If a HEAD request sent to it fails because the address has invalid schema, times out or there is a connection error, the generator yields nothing. :returns: valid redirection addresses. If a request for a redirection address fails, and the address is still a valid URL string, it's included as the last yielded value. If it's not, the previous value is the last one. :raises ValuError: if the argument is not a valid URL """ if not is_valid_url(url): raise InvalidURLError('{} is not a valid URL'.format(url)) try: response = self.session.head(url) except (ConnectionError, InvalidSchema, Timeout): raise StopIteration try: generator = self.session.resolve_redirects( response, response.request ) for response in generator: yield response.url except InvalidURL: pass except (ConnectionError, InvalidSchema, Timeout) as error: last_url = response.headers['location'] if isinstance(error, Timeout) or is_valid_url(last_url): yield last_url
[ "def", "get_locations", "(", "self", ",", "url", ")", ":", "if", "not", "is_valid_url", "(", "url", ")", ":", "raise", "InvalidURLError", "(", "'{} is not a valid URL'", ".", "format", "(", "url", ")", ")", "try", ":", "response", "=", "self", ".", "sess...
42.483871
17.580645
def PMIS(S): """C/F splitting using the Parallel Modified Independent Set method. Parameters ---------- S : csr_matrix Strength of connection matrix indicating the strength between nodes i and j (S_ij) Returns ------- splitting : ndarray Array of length of S of ones (coarse) and zeros (fine) Examples -------- >>> from pyamg.gallery import poisson >>> from pyamg.classical import PMIS >>> S = poisson((7,), format='csr') # 1D mesh with 7 vertices >>> splitting = PMIS(S) See Also -------- MIS References ---------- .. [6] Hans De Sterck, Ulrike M Yang, and Jeffrey J Heys "Reducing complexity in parallel algebraic multigrid preconditioners" SIAM Journal on Matrix Analysis and Applications 2006; 27:1019-1039. """ S = remove_diagonal(S) weights, G, S, T = preprocess(S) return MIS(G, weights)
[ "def", "PMIS", "(", "S", ")", ":", "S", "=", "remove_diagonal", "(", "S", ")", "weights", ",", "G", ",", "S", ",", "T", "=", "preprocess", "(", "S", ")", "return", "MIS", "(", "G", ",", "weights", ")" ]
25.6
24.428571
def clear( # type: ignore self, ch: int = ord(" "), fg: Tuple[int, int, int] = ..., bg: Tuple[int, int, int] = ..., ) -> None: """Reset all values in this console to a single value. `ch` is the character to clear the console with. Defaults to the space character. `fg` and `bg` are the colors to clear the console with. Defaults to white-on-black if the console defaults are untouched. .. note:: If `fg`/`bg` are not set, they will default to :any:`default_fg`/:any:`default_bg`. However, default values other than white-on-back are deprecated. .. versionchanged:: 8.5 Added the `ch`, `fg`, and `bg` parameters. Non-white-on-black default values are deprecated. """ if fg is ...: fg = self.default_fg if fg != (255, 255, 255): self.__clear_warning("fg", fg) if bg is ...: bg = self.default_bg if bg != (0, 0, 0): self.__clear_warning("bg", bg) self._tiles[...] = ch, (*fg, 255), (*bg, 255)
[ "def", "clear", "(", "# type: ignore", "self", ",", "ch", ":", "int", "=", "ord", "(", "\" \"", ")", ",", "fg", ":", "Tuple", "[", "int", ",", "int", ",", "int", "]", "=", "...", ",", "bg", ":", "Tuple", "[", "int", ",", "int", ",", "int", "]...
35.28125
18.125
def update_fw(self, nids, fw_type, fw_ver, fw_path=None): """Update firwmare of all node_ids in nids.""" fw_bin = None if fw_path: fw_bin = load_fw(fw_path) if not fw_bin: return self.ota.make_update(nids, fw_type, fw_ver, fw_bin)
[ "def", "update_fw", "(", "self", ",", "nids", ",", "fw_type", ",", "fw_ver", ",", "fw_path", "=", "None", ")", ":", "fw_bin", "=", "None", "if", "fw_path", ":", "fw_bin", "=", "load_fw", "(", "fw_path", ")", "if", "not", "fw_bin", ":", "return", "sel...
36.875
13.875
def AddHash(self, sha1, md5, crc, file_name, file_size, product_code_list, op_system_code_list, special_code): """Adds a new file from the NSRL hash database. We create a new subject in: aff4:/files/nsrl/<sha1> with all the other arguments as attributes. Args: sha1: SHA1 digest as a hex encoded string. md5: MD5 digest as a hex encoded string. crc: File CRC as an integer. file_name: Filename. file_size: Size of file. product_code_list: List of products this file is part of. op_system_code_list: List of operating systems this file is part of. special_code: Special code (malicious/special/normal file). """ file_store_urn = self.PATH.Add(sha1) special_code = self.FILE_TYPES.get(special_code, self.FILE_TYPES[""]) with aff4.FACTORY.Create( file_store_urn, NSRLFile, mode="w", token=self.token) as fd: fd.Set( fd.Schema.NSRL( sha1=sha1.decode("hex"), md5=md5.decode("hex"), crc32=crc, file_name=file_name, file_size=file_size, product_code=product_code_list, op_system_code=op_system_code_list, file_type=special_code))
[ "def", "AddHash", "(", "self", ",", "sha1", ",", "md5", ",", "crc", ",", "file_name", ",", "file_size", ",", "product_code_list", ",", "op_system_code_list", ",", "special_code", ")", ":", "file_store_urn", "=", "self", ".", "PATH", ".", "Add", "(", "sha1"...
36.147059
16.205882
def find_types_added_to_unions( old_schema: GraphQLSchema, new_schema: GraphQLSchema ) -> List[DangerousChange]: """Find types added to union. Given two schemas, returns a list containing descriptions of any dangerous changes in the new_schema related to adding types to a union type. """ old_type_map = old_schema.type_map new_type_map = new_schema.type_map types_added_to_union = [] for new_type_name, new_type in new_type_map.items(): old_type = old_type_map.get(new_type_name) if not (is_union_type(old_type) and is_union_type(new_type)): continue old_type = cast(GraphQLUnionType, old_type) new_type = cast(GraphQLUnionType, new_type) type_names_in_old_union = {type_.name for type_ in old_type.types} for type_ in new_type.types: type_name = type_.name if type_name not in type_names_in_old_union: types_added_to_union.append( DangerousChange( DangerousChangeType.TYPE_ADDED_TO_UNION, f"{type_name} was added to union type {new_type_name}.", ) ) return types_added_to_union
[ "def", "find_types_added_to_unions", "(", "old_schema", ":", "GraphQLSchema", ",", "new_schema", ":", "GraphQLSchema", ")", "->", "List", "[", "DangerousChange", "]", ":", "old_type_map", "=", "old_schema", ".", "type_map", "new_type_map", "=", "new_schema", ".", ...
41.275862
16.586207
def filter_cols(model, *filtered_columns): """Return columnsnames for a model except named ones. Useful for defer() for example to retain only columns of interest """ m = sa.orm.class_mapper(model) return list( {p.key for p in m.iterate_properties if hasattr(p, "columns")}.difference( filtered_columns ) )
[ "def", "filter_cols", "(", "model", ",", "*", "filtered_columns", ")", ":", "m", "=", "sa", ".", "orm", ".", "class_mapper", "(", "model", ")", "return", "list", "(", "{", "p", ".", "key", "for", "p", "in", "m", ".", "iterate_properties", "if", "hasa...
31.727273
20.090909
def reverseDict(d): """ Helper for generating fullToTag Makes dict of value to key """ retD = {} for k in d: retD[d[k]] = k return retD
[ "def", "reverseDict", "(", "d", ")", ":", "retD", "=", "{", "}", "for", "k", "in", "d", ":", "retD", "[", "d", "[", "k", "]", "]", "=", "k", "return", "retD" ]
18.111111
14.555556
def inference_q(self, next_action_arr): ''' Infernce Q-Value. Args: next_action_arr: `np.ndarray` of action. Returns: `np.ndarray` of Q-Values. ''' q_arr = next_action_arr.reshape((next_action_arr.shape[0], -1)) self.__q_arr_list.append(q_arr) while len(self.__q_arr_list) > self.__seq_len: self.__q_arr_list = self.__q_arr_list[1:] while len(self.__q_arr_list) < self.__seq_len: self.__q_arr_list.append(self.__q_arr_list[-1]) q_arr = np.array(self.__q_arr_list) q_arr = q_arr.transpose((1, 0, 2)) q_arr = self.__lstm_model.inference(q_arr) return q_arr[:, -1].reshape((q_arr.shape[0], 1))
[ "def", "inference_q", "(", "self", ",", "next_action_arr", ")", ":", "q_arr", "=", "next_action_arr", ".", "reshape", "(", "(", "next_action_arr", ".", "shape", "[", "0", "]", ",", "-", "1", ")", ")", "self", ".", "__q_arr_list", ".", "append", "(", "q...
35.47619
17.666667
def characterize(cls, record): """Load the record in a concrete subclass of this type. """ classes = list(cls.__subclasses__()) if not isabstract(cls): classes.append(cls) for subclass in classes: entity = subclass(record) if entity.is_valid(): return entity raise RuntimeError("could not find the type for '{}'".format(record.id))
[ "def", "characterize", "(", "cls", ",", "record", ")", ":", "classes", "=", "list", "(", "cls", ".", "__subclasses__", "(", ")", ")", "if", "not", "isabstract", "(", "cls", ")", ":", "classes", ".", "append", "(", "cls", ")", "for", "subclass", "in",...
38.272727
9.181818
def ajax_upload(request, folder_id=None): """ Receives an upload from the uploader. Receives only one file at a time. """ folder = None if folder_id: try: # Get folder folder = Folder.objects.get(pk=folder_id) except Folder.DoesNotExist: return JsonResponse({'error': NO_FOLDER_ERROR}) # check permissions if folder and not folder.has_add_children_permission(request): return JsonResponse({'error': NO_PERMISSIONS_FOR_FOLDER}) try: if len(request.FILES) == 1: # dont check if request is ajax or not, just grab the file upload, filename, is_raw = handle_request_files_upload(request) else: # else process the request as usual upload, filename, is_raw = handle_upload(request) # TODO: Deprecated/refactor # Get clipboad # clipboard = Clipboard.objects.get_or_create(user=request.user)[0] # find the file type for filer_class in filer_settings.FILER_FILE_MODELS: FileSubClass = load_model(filer_class) # TODO: What if there are more than one that qualify? if FileSubClass.matches_file_type(filename, upload, request): FileForm = modelform_factory( model=FileSubClass, fields=('original_filename', 'owner', 'file') ) break uploadform = FileForm({'original_filename': filename, 'owner': request.user.pk}, {'file': upload}) if uploadform.is_valid(): file_obj = uploadform.save(commit=False) # Enforce the FILER_IS_PUBLIC_DEFAULT file_obj.is_public = filer_settings.FILER_IS_PUBLIC_DEFAULT file_obj.folder = folder file_obj.save() # TODO: Deprecated/refactor # clipboard_item = ClipboardItem( # clipboard=clipboard, file=file_obj) # clipboard_item.save() # Try to generate thumbnails. if not file_obj.icons: # There is no point to continue, as we can't generate # thumbnails for this file. Usual reasons: bad format or # filename. file_obj.delete() # This would be logged in BaseImage._generate_thumbnails() # if FILER_ENABLE_LOGGING is on. return JsonResponse( {'error': 'failed to generate icons for file'}, status=500, ) thumbnail = None # Backwards compatibility: try to get specific icon size (32px) # first. Then try medium icon size (they are already sorted), # fallback to the first (smallest) configured icon. for size in (['32'] + filer_settings.FILER_ADMIN_ICON_SIZES[1::-1]): try: thumbnail = file_obj.icons[size] break except KeyError: continue data = { 'thumbnail': thumbnail, 'alt_text': '', 'label': str(file_obj), 'file_id': file_obj.pk, } # prepare preview thumbnail if type(file_obj) == Image: thumbnail_180_options = { 'size': (180, 180), 'crop': True, 'upscale': True, } thumbnail_180 = file_obj.file.get_thumbnail( thumbnail_180_options) data['thumbnail_180'] = thumbnail_180.url data['original_image'] = file_obj.url return JsonResponse(data) else: form_errors = '; '.join(['%s: %s' % ( field, ', '.join(errors)) for field, errors in list( uploadform.errors.items()) ]) raise UploadException( "AJAX request not valid: form invalid '%s'" % ( form_errors,)) except UploadException as e: return JsonResponse({'error': str(e)}, status=500)
[ "def", "ajax_upload", "(", "request", ",", "folder_id", "=", "None", ")", ":", "folder", "=", "None", "if", "folder_id", ":", "try", ":", "# Get folder", "folder", "=", "Folder", ".", "objects", ".", "get", "(", "pk", "=", "folder_id", ")", "except", "...
40.368932
16
def isSurrounded(self): """ Returns if the object is separating and applying to a malefic considering bad aspects. """ malefics = [const.MARS, const.SATURN] return self.__sepApp(malefics, aspList=[0, 90, 180])
[ "def", "isSurrounded", "(", "self", ")", ":", "malefics", "=", "[", "const", ".", "MARS", ",", "const", ".", "SATURN", "]", "return", "self", ".", "__sepApp", "(", "malefics", ",", "aspList", "=", "[", "0", ",", "90", ",", "180", "]", ")" ]
36.142857
10.857143
def requires_user(fn): """ Requires that the calling Subject be *either* authenticated *or* remembered via RememberMe services before allowing access. This method essentially ensures that subject.identifiers IS NOT None """ @functools.wraps(fn) def wrap(*args, **kwargs): subject = WebYosai.get_current_subject() if subject.identifiers is None: msg = ("Attempting to perform a user-only operation. The " "current Subject is NOT a user (they haven't been " "authenticated or remembered from a previous login). " "ACCESS DENIED.") raise WebYosai.get_current_webregistry().raise_unauthorized(msg) return fn(*args, **kwargs) return wrap
[ "def", "requires_user", "(", "fn", ")", ":", "@", "functools", ".", "wraps", "(", "fn", ")", "def", "wrap", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "subject", "=", "WebYosai", ".", "get_current_subject", "(", ")", "if", "subject", ".", ...
40.9
21.7
def match_rule_patterns(fixed_text, cur=0): """Matches given text at cursor position with rule patterns Returns a dictionary of four elements: - "matched" - Bool: depending on if match found - "found" - string/None: Value of matched pattern's 'find' key or none - "replaced": string Replaced string if match found else input string at cursor - "rules": dict/None: A dict of rules or None if no match found """ pattern = exact_find_in_pattern(fixed_text, cur, RULE_PATTERNS) # if len(pattern) == 1: if len(pattern) > 0: return {"matched": True, "found": pattern[0]['find'], "replaced": pattern[0]['replace'], "rules": pattern[0]['rules']} else: return {"matched": False, "found": None, "replaced": fixed_text[cur], "rules": None}
[ "def", "match_rule_patterns", "(", "fixed_text", ",", "cur", "=", "0", ")", ":", "pattern", "=", "exact_find_in_pattern", "(", "fixed_text", ",", "cur", ",", "RULE_PATTERNS", ")", "# if len(pattern) == 1:", "if", "len", "(", "pattern", ")", ">", "0", ":", "r...
40.4
21.9
def plot_hdd(HDD, B, M, s): """ Function to make hysteresis, deltaM and DdeltaM plots Parameters: _______________ Input HDD : dictionary with figure numbers for the keys: 'hyst' : hysteresis plot normalized to maximum value 'deltaM' : Delta M plot 'DdeltaM' : differential of Delta M plot B : list of field values in tesla M : list of magnetizations in arbitrary units s : specimen name string Ouput hpars : dictionary of hysteresis parameters with keys: 'hysteresis_xhf', 'hysteresis_ms_moment', 'hysteresis_mr_moment', 'hysteresis_bc' """ hpars, deltaM, Bdm = plot_hys( HDD['hyst'], B, M, s) # Moff is the "fixed" loop data DdeltaM = [] Mhalf = "" for k in range(2, len(Bdm)): # differnential DdeltaM.append( old_div(abs(deltaM[k] - deltaM[k - 2]), (Bdm[k] - Bdm[k - 2]))) for k in range(len(deltaM)): if old_div(deltaM[k], deltaM[0]) < 0.5: Mhalf = k break try: Bhf = Bdm[Mhalf - 1:Mhalf + 1] Mhf = deltaM[Mhalf - 1:Mhalf + 1] # best fit line through two bounding points poly = np.polyfit(Bhf, Mhf, 1) Bcr = old_div((.5 * deltaM[0] - poly[1]), poly[0]) hpars['hysteresis_bcr'] = '%8.3e' % (Bcr) hpars['magic_method_codes'] = "LP-BCR-HDM" if HDD['deltaM'] != 0: plot_delta_m(HDD['deltaM'], Bdm, deltaM, Bcr, s) plt.axhline(0, color='k') plt.axvline(0, color='k') plot_d_delta_m(HDD['DdeltaM'], Bdm, DdeltaM, s) except: hpars['hysteresis_bcr'] = '0' hpars['magic_method_codes'] = "" return hpars
[ "def", "plot_hdd", "(", "HDD", ",", "B", ",", "M", ",", "s", ")", ":", "hpars", ",", "deltaM", ",", "Bdm", "=", "plot_hys", "(", "HDD", "[", "'hyst'", "]", ",", "B", ",", "M", ",", "s", ")", "# Moff is the \"fixed\" loop data", "DdeltaM", "=", "[",...
36
15.702128
def _check_for_cycle(self, variable, period): """ Raise an exception in the case of a circular definition, where evaluating a variable for a given period loops around to evaluating the same variable/period pair. Also guards, as a heuristic, against "quasicircles", where the evaluation of a variable at a period involves the same variable at a different period. """ previous_periods = [_period for (_name, _period) in self.computation_stack if _name == variable.name] self.computation_stack.append((variable.name, str(period))) if str(period) in previous_periods: raise CycleError("Circular definition detected on formula {}@{}".format(variable.name, period)) spiral = len(previous_periods) >= self.max_spiral_loops if spiral: self.invalidate_spiral_variables(variable) message = "Quasicircular definition detected on formula {}@{} involving {}".format(variable.name, period, self.computation_stack) raise SpiralError(message, variable.name)
[ "def", "_check_for_cycle", "(", "self", ",", "variable", ",", "period", ")", ":", "previous_periods", "=", "[", "_period", "for", "(", "_name", ",", "_period", ")", "in", "self", ".", "computation_stack", "if", "_name", "==", "variable", ".", "name", "]", ...
66.375
32.75
def sign_file(self, filepath): """Signs file if possible""" if not GPG_PRESENT: return signed_data = sign(filepath) signature = signed_data.data if signature is None or not signature: statustext = _('Error signing file. ') + signed_data.stderr try: post_command_event(self.main_window, self.StatusBarMsg, text=statustext) except TypeError: # The main window does not exist any more pass return with open(filepath + '.sig', 'wb') as signfile: signfile.write(signature) # Statustext differs if a save has occurred if self.code_array.safe_mode: statustext = _('File saved and signed') else: statustext = _('File signed') try: post_command_event(self.main_window, self.StatusBarMsg, text=statustext) except TypeError: # The main window does not exist any more pass
[ "def", "sign_file", "(", "self", ",", "filepath", ")", ":", "if", "not", "GPG_PRESENT", ":", "return", "signed_data", "=", "sign", "(", "filepath", ")", "signature", "=", "signed_data", ".", "data", "if", "signature", "is", "None", "or", "not", "signature"...
29.638889
20.305556
def quantileclip(arrays, masks=None, dtype=None, out=None, zeros=None, scales=None, weights=None, fclip=0.10): """Combine arrays using the sigma-clipping, with masks. Inputs and masks are a list of array objects. All input arrays have the same shape. If present, the masks have the same shape also. The function returns an array with one more dimension than the inputs and with size (3, shape). out[0] contains the mean, out[1] the variance and out[2] the number of points used. :param arrays: a list of arrays :param masks: a list of mask arrays, True values are masked :param dtype: data type of the output :param out: optional output, with one more axis than the input arrays :param fclip: fraction of points removed on both ends. Maximum is 0.4 (80% of points rejected) :return: mean, variance of the mean and number of points stored """ return generic_combine(intl_combine.quantileclip_method(fclip), arrays, masks=masks, dtype=dtype, out=out, zeros=zeros, scales=scales, weights=weights)
[ "def", "quantileclip", "(", "arrays", ",", "masks", "=", "None", ",", "dtype", "=", "None", ",", "out", "=", "None", ",", "zeros", "=", "None", ",", "scales", "=", "None", ",", "weights", "=", "None", ",", "fclip", "=", "0.10", ")", ":", "return", ...
48.782609
23.913043
def getHostCaPath(self, name): ''' Gets the path to the CA certificate that issued a given host keypair. Args: name (str): The name of the host keypair. Examples: Get the path to the CA cert which issue the cert for "myhost": mypath = cdir.getHostCaPath('myhost') Returns: str: The path if exists. ''' cert = self.getHostCert(name) if cert is None: return None return self._getCaPath(cert)
[ "def", "getHostCaPath", "(", "self", ",", "name", ")", ":", "cert", "=", "self", ".", "getHostCert", "(", "name", ")", "if", "cert", "is", "None", ":", "return", "None", "return", "self", ".", "_getCaPath", "(", "cert", ")" ]
25.55
24.15
def ibatch(size, iterable=None, rest=False): """ add example :param size: :param iterable: :param rest: :return: """ @iterflow def exact_size(it): it = iter(it) while True: yield [it.next() for _ in xrange(size)] @iterflow def at_most(it): it = iter(it) while True: data = [] for _ in xrange(size): try: data.append(it.next()) except StopIteration: if data: yield data raise StopIteration yield data ibatchit = at_most if rest else exact_size return ibatchit if iterable is None else ibatchit(iterable)
[ "def", "ibatch", "(", "size", ",", "iterable", "=", "None", ",", "rest", "=", "False", ")", ":", "@", "iterflow", "def", "exact_size", "(", "it", ")", ":", "it", "=", "iter", "(", "it", ")", "while", "True", ":", "yield", "[", "it", ".", "next", ...
22.59375
18.21875
def push(self, index=None): """Push built documents to ElasticSearch. If ``index`` is specified, only that index will be pushed. """ for ind in self.indexes: if index and not isinstance(ind, index): continue ind.push()
[ "def", "push", "(", "self", ",", "index", "=", "None", ")", ":", "for", "ind", "in", "self", ".", "indexes", ":", "if", "index", "and", "not", "isinstance", "(", "ind", ",", "index", ")", ":", "continue", "ind", ".", "push", "(", ")" ]
31.444444
14.777778
def parse(cls, fptr, offset, length): """Parse data reference box. Parameters ---------- fptr : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- DataReferenceBox Instance of the current data reference box. """ num_bytes = offset + length - fptr.tell() read_buffer = fptr.read(num_bytes) # Read the number of data references ndr, = struct.unpack_from('>H', read_buffer, offset=0) # Need to keep track of where the next url box starts. box_offset = 2 data_entry_url_box_list = [] for j in range(ndr): # Create an in-memory binary stream for each URL box. box_fptr = io.BytesIO(read_buffer[box_offset:]) box_buffer = box_fptr.read(8) (box_length, box_id) = struct.unpack_from('>I4s', box_buffer, offset=0) box = DataEntryURLBox.parse(box_fptr, 0, box_length) # Need to adjust the box start to that of the "real" file. box.offset = offset + 8 + box_offset data_entry_url_box_list.append(box) # Point to the next embedded URL box. box_offset += box_length return cls(data_entry_url_box_list, length=length, offset=offset)
[ "def", "parse", "(", "cls", ",", "fptr", ",", "offset", ",", "length", ")", ":", "num_bytes", "=", "offset", "+", "length", "-", "fptr", ".", "tell", "(", ")", "read_buffer", "=", "fptr", ".", "read", "(", "num_bytes", ")", "# Read the number of data ref...
32.545455
19.840909
def cycle_list(self,*args): ''' return the list of cycles contained if the dataset ''' noargs = len(args) == 0 return np.unique(self.cycle) if noargs else np.unique(self.cycle.compress(args[0]))
[ "def", "cycle_list", "(", "self", ",", "*", "args", ")", ":", "noargs", "=", "len", "(", "args", ")", "==", "0", "return", "np", ".", "unique", "(", "self", ".", "cycle", ")", "if", "noargs", "else", "np", ".", "unique", "(", "self", ".", "cycle"...
39
24.333333
def get_table_metadata(self, resource, resource_class): """ Get metadata for a given resource: class :param resource: The name of the resource :param resource_class: The name of the class to get metadata from :return: list """ return self._make_metadata_request(meta_id=resource + ':' + resource_class, metadata_type='METADATA-TABLE')
[ "def", "get_table_metadata", "(", "self", ",", "resource", ",", "resource_class", ")", ":", "return", "self", ".", "_make_metadata_request", "(", "meta_id", "=", "resource", "+", "':'", "+", "resource_class", ",", "metadata_type", "=", "'METADATA-TABLE'", ")" ]
47.875
19.875
def get_default_config(self): """ Return the default config for the handler """ config = super(MySQLHandler, self).get_default_config() config.update({ }) return config
[ "def", "get_default_config", "(", "self", ")", ":", "config", "=", "super", "(", "MySQLHandler", ",", "self", ")", ".", "get_default_config", "(", ")", "config", ".", "update", "(", "{", "}", ")", "return", "config" ]
21.7
18.9
def neighbors(self, node_id): """Find all the nodes where there is an edge from the specified node to that node. Returns a list of node ids.""" node = self.get_node(node_id) flattened_nodes_list = [] for a, b in [self.get_edge(edge_id)['vertices'] for edge_id in node['edges']]: flattened_nodes_list.append(a) flattened_nodes_list.append(b) node_set = set(flattened_nodes_list) if node_id in node_set: node_set.remove(node_id) return [nid for nid in node_set]
[ "def", "neighbors", "(", "self", ",", "node_id", ")", ":", "node", "=", "self", ".", "get_node", "(", "node_id", ")", "flattened_nodes_list", "=", "[", "]", "for", "a", ",", "b", "in", "[", "self", ".", "get_edge", "(", "edge_id", ")", "[", "'vertice...
45.666667
7.333333
def get_instance_by_bin_uuid(model, bin_uuid): """Get an instance by binary uuid. :param model: a string, model name in rio.models. :param bin_uuid: a 16-bytes binary string. :return: None or a SQLAlchemy instance. """ try: model = get_model(model) except ImportError: return None return model.query.filter_by(**{'bin_uuid': bin_uuid}).first()
[ "def", "get_instance_by_bin_uuid", "(", "model", ",", "bin_uuid", ")", ":", "try", ":", "model", "=", "get_model", "(", "model", ")", "except", "ImportError", ":", "return", "None", "return", "model", ".", "query", ".", "filter_by", "(", "*", "*", "{", "...
29.307692
16.307692