text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def run(self): """ Monitors if the config file or plugins are updated. Also outputs the JSON data generated by the plugins, without needing to poll the threads. """ self.run_plugins() while True: # Reload plugins and config if either the config file or plugin # directory are modified. if self._config_mod_time != os.path.getmtime(self._config_file_path) or \ self._plugin_mod_time != os.path.getmtime(self._plugin_path): self.thread_manager.kill_all_threads() self.output_dict.clear() self.reload() self.run_plugins() self.output_to_bar(json.dumps(self._remove_empty_output())) time.sleep(self.config.general['interval'])
[ "def", "run", "(", "self", ")", ":", "self", ".", "run_plugins", "(", ")", "while", "True", ":", "# Reload plugins and config if either the config file or plugin", "# directory are modified.", "if", "self", ".", "_config_mod_time", "!=", "os", ".", "path", ".", "getmtime", "(", "self", ".", "_config_file_path", ")", "or", "self", ".", "_plugin_mod_time", "!=", "os", ".", "path", ".", "getmtime", "(", "self", ".", "_plugin_path", ")", ":", "self", ".", "thread_manager", ".", "kill_all_threads", "(", ")", "self", ".", "output_dict", ".", "clear", "(", ")", "self", ".", "reload", "(", ")", "self", ".", "run_plugins", "(", ")", "self", ".", "output_to_bar", "(", "json", ".", "dumps", "(", "self", ".", "_remove_empty_output", "(", ")", ")", ")", "time", ".", "sleep", "(", "self", ".", "config", ".", "general", "[", "'interval'", "]", ")" ]
46.941176
19.882353
def side_effect(self, func, *args, **kwargs): ''' Wrap side effects for spies. ''' self._spy_side_effect = func self._spy_side_effect_args = args self._spy_side_effect_kwargs = kwargs return self
[ "def", "side_effect", "(", "self", ",", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_spy_side_effect", "=", "func", "self", ".", "_spy_side_effect_args", "=", "args", "self", ".", "_spy_side_effect_kwargs", "=", "kwargs", "return", "self" ]
30.5
12.25
def add_resource_context( app: web.Application, module=None, url_prefix: str=None, name_prefix: str=None, make_resource=lambda cls: cls() ): """Context manager which yields a function for adding multiple resources from a given module to an app using `ResourceRouter <aiohttp_utils.routing.ResourceRouter>`. Example: .. code-block:: python # myapp/articles/views.py class ArticleList: async def get(self, request): return web.Response(b'article list...') class ArticleDetail: async def get(self, request): return web.Response(b'article detail...') .. code-block:: python # myapp/app.py from myapp.articles import views with add_resource_context(app, url_prefix='/api/') as route: route('/articles/', views.ArticleList()) route('/articles/{pk}', views.ArticleDetail()) app.router['ArticleList:get'].url() # /api/articles/ app.router['ArticleDetail:get'].url(parts={'pk': 42}) # /api/articles/42 If you prefer, you can also pass module and class names as strings. :: with add_resource_context(app, module='myapp.articles.views', url_prefix='/api/') as route: route('/articles/', 'ArticleList') route('/articles/{pk}', 'ArticleDetail') .. note:: If passing class names, the resource classes will be instantiated with no arguments. You can change this behavior by overriding ``make_resource``. .. code-block:: python # myapp/authors/views.py class AuthorList: def __init__(self, db): self.db = db async def get(self, request): # Fetch authors from self.db... .. code-block:: python # myapp/app.py from myapp.database import db with add_resource_context(app, module='myapp.authors.views', url_prefix='/api/', make_resource=lambda cls: cls(db=db)) as route: route('/authors/', 'AuthorList') :param app: Application to add routes to. :param resource: Import path to module (str) or module object which contains the resource classes. :param url_prefix: Prefix to prepend to all route paths. :param name_prefix: Prefix to prepend to all route names. :param make_resource: Function which receives a resource class and returns a resource instance. """ assert isinstance(app.router, ResourceRouter), 'app must be using ResourceRouter' if isinstance(module, (str, bytes)): module = importlib.import_module(module) def get_base_name(resource, method_name, names): return names.get(method_name, app.router.get_default_handler_name(resource, method_name)) default_make_resource = make_resource def add_route( path: str, resource, names: Mapping=None, make_resource=None ): make_resource = make_resource or default_make_resource names = names or {} if isinstance(resource, (str, bytes)): if not module: raise ValueError( 'Must pass module to add_route_context if passing resource name strings.' ) resource_cls = getattr(module, resource) resource = make_resource(resource_cls) path = make_path(path, url_prefix) if name_prefix: supported_method_names = get_supported_method_names(resource) names = { method_name: '.'.join( (name_prefix, get_base_name(resource, method_name, names=names)) ) for method_name in supported_method_names } return app.router.add_resource_object(path, resource, names=names) yield add_route
[ "def", "add_resource_context", "(", "app", ":", "web", ".", "Application", ",", "module", "=", "None", ",", "url_prefix", ":", "str", "=", "None", ",", "name_prefix", ":", "str", "=", "None", ",", "make_resource", "=", "lambda", "cls", ":", "cls", "(", ")", ")", ":", "assert", "isinstance", "(", "app", ".", "router", ",", "ResourceRouter", ")", ",", "'app must be using ResourceRouter'", "if", "isinstance", "(", "module", ",", "(", "str", ",", "bytes", ")", ")", ":", "module", "=", "importlib", ".", "import_module", "(", "module", ")", "def", "get_base_name", "(", "resource", ",", "method_name", ",", "names", ")", ":", "return", "names", ".", "get", "(", "method_name", ",", "app", ".", "router", ".", "get_default_handler_name", "(", "resource", ",", "method_name", ")", ")", "default_make_resource", "=", "make_resource", "def", "add_route", "(", "path", ":", "str", ",", "resource", ",", "names", ":", "Mapping", "=", "None", ",", "make_resource", "=", "None", ")", ":", "make_resource", "=", "make_resource", "or", "default_make_resource", "names", "=", "names", "or", "{", "}", "if", "isinstance", "(", "resource", ",", "(", "str", ",", "bytes", ")", ")", ":", "if", "not", "module", ":", "raise", "ValueError", "(", "'Must pass module to add_route_context if passing resource name strings.'", ")", "resource_cls", "=", "getattr", "(", "module", ",", "resource", ")", "resource", "=", "make_resource", "(", "resource_cls", ")", "path", "=", "make_path", "(", "path", ",", "url_prefix", ")", "if", "name_prefix", ":", "supported_method_names", "=", "get_supported_method_names", "(", "resource", ")", "names", "=", "{", "method_name", ":", "'.'", ".", "join", "(", "(", "name_prefix", ",", "get_base_name", "(", "resource", ",", "method_name", ",", "names", "=", "names", ")", ")", ")", "for", "method_name", "in", "supported_method_names", "}", "return", "app", ".", "router", ".", "add_resource_object", "(", "path", ",", "resource", ",", "names", "=", "names", ")", "yield", "add_route" ]
36.885714
22.647619
def report(mount): ''' Report on quotas for a specific volume CLI Example: .. code-block:: bash salt '*' quota.report /media/data ''' ret = {mount: {}} ret[mount]['User Quotas'] = _parse_quota(mount, '-u') ret[mount]['Group Quotas'] = _parse_quota(mount, '-g') return ret
[ "def", "report", "(", "mount", ")", ":", "ret", "=", "{", "mount", ":", "{", "}", "}", "ret", "[", "mount", "]", "[", "'User Quotas'", "]", "=", "_parse_quota", "(", "mount", ",", "'-u'", ")", "ret", "[", "mount", "]", "[", "'Group Quotas'", "]", "=", "_parse_quota", "(", "mount", ",", "'-g'", ")", "return", "ret" ]
21.785714
23.642857
def conjugate(self): """The element-wise conjugate matrix This is defined only if all the entries in the matrix have a defined conjugate (i.e., they have a `conjugate` method). This is *not* the case for a matrix of operators. In such a case, only an :meth:`elementwise` :func:`adjoint` would be applicable, but this is mathematically different from a complex conjugate. Raises: NoConjugateMatrix: if any entries have no `conjugate` method """ try: return Matrix(np_conjugate(self.matrix)) except AttributeError: raise NoConjugateMatrix( "Matrix %s contains entries that have no defined " "conjugate" % str(self))
[ "def", "conjugate", "(", "self", ")", ":", "try", ":", "return", "Matrix", "(", "np_conjugate", "(", "self", ".", "matrix", ")", ")", "except", "AttributeError", ":", "raise", "NoConjugateMatrix", "(", "\"Matrix %s contains entries that have no defined \"", "\"conjugate\"", "%", "str", "(", "self", ")", ")" ]
41.444444
21.388889
def get_file_info(hash, context=None): """Returns information about the file, identified by ``hash``. If the `context` (an ident-hash) is supplied, the information returned will be specific to that context. """ if context is None: stmt = _get_sql('get-file-info.sql') args = dict(hash=hash) else: stmt = _get_sql('get-file-info-in-context.sql') id, version = get_id_n_version(context) args = dict(hash=hash, id=id, version=version) with db_connect() as db_conn: with db_conn.cursor() as cursor: cursor.execute(stmt, args) try: filename, media_type = cursor.fetchone() except TypeError: raise FileNotFound(hash) return filename, media_type
[ "def", "get_file_info", "(", "hash", ",", "context", "=", "None", ")", ":", "if", "context", "is", "None", ":", "stmt", "=", "_get_sql", "(", "'get-file-info.sql'", ")", "args", "=", "dict", "(", "hash", "=", "hash", ")", "else", ":", "stmt", "=", "_get_sql", "(", "'get-file-info-in-context.sql'", ")", "id", ",", "version", "=", "get_id_n_version", "(", "context", ")", "args", "=", "dict", "(", "hash", "=", "hash", ",", "id", "=", "id", ",", "version", "=", "version", ")", "with", "db_connect", "(", ")", "as", "db_conn", ":", "with", "db_conn", ".", "cursor", "(", ")", "as", "cursor", ":", "cursor", ".", "execute", "(", "stmt", ",", "args", ")", "try", ":", "filename", ",", "media_type", "=", "cursor", ".", "fetchone", "(", ")", "except", "TypeError", ":", "raise", "FileNotFound", "(", "hash", ")", "return", "filename", ",", "media_type" ]
34.863636
12.727273
def _parse_scalars(scalars): """Parse the scalars from the YAML file content to a dictionary of ScalarType(s). :return: A dictionary { 'full.scalar.label': ScalarType } """ scalar_dict = {} # Scalars are defined in a fixed two-level hierarchy within the definition file. # The first level contains the category name, while the second level contains the # probe name (e.g. "category.name: probe: ..."). for category_name in scalars: category = scalars[category_name] for probe_name in category: # We found a scalar type. Go ahead and parse it. scalar_definition = category[probe_name] # We pass |strict_type_checks=False| as we don't want to do any check # server side. This includes skipping the checks for the required keys. scalar_info = ScalarType(category_name, probe_name, scalar_definition, strict_type_checks=False) scalar_dict[scalar_info.label] = scalar_info return scalar_dict
[ "def", "_parse_scalars", "(", "scalars", ")", ":", "scalar_dict", "=", "{", "}", "# Scalars are defined in a fixed two-level hierarchy within the definition file.", "# The first level contains the category name, while the second level contains the", "# probe name (e.g. \"category.name: probe: ...\").", "for", "category_name", "in", "scalars", ":", "category", "=", "scalars", "[", "category_name", "]", "for", "probe_name", "in", "category", ":", "# We found a scalar type. Go ahead and parse it.", "scalar_definition", "=", "category", "[", "probe_name", "]", "# We pass |strict_type_checks=False| as we don't want to do any check", "# server side. This includes skipping the checks for the required keys.", "scalar_info", "=", "ScalarType", "(", "category_name", ",", "probe_name", ",", "scalar_definition", ",", "strict_type_checks", "=", "False", ")", "scalar_dict", "[", "scalar_info", ".", "label", "]", "=", "scalar_info", "return", "scalar_dict" ]
50.045455
24.227273
def split_by_files(self, valid_names:'ItemList')->'ItemLists': "Split the data by using the names in `valid_names` for validation." if isinstance(self.items[0], Path): return self.split_by_valid_func(lambda o: o.name in valid_names) else: return self.split_by_valid_func(lambda o: os.path.basename(o) in valid_names)
[ "def", "split_by_files", "(", "self", ",", "valid_names", ":", "'ItemList'", ")", "->", "'ItemLists'", ":", "if", "isinstance", "(", "self", ".", "items", "[", "0", "]", ",", "Path", ")", ":", "return", "self", ".", "split_by_valid_func", "(", "lambda", "o", ":", "o", ".", "name", "in", "valid_names", ")", "else", ":", "return", "self", ".", "split_by_valid_func", "(", "lambda", "o", ":", "os", ".", "path", ".", "basename", "(", "o", ")", "in", "valid_names", ")" ]
84.25
44.25
def match_file(apikey, path, metadata=None): """Uses the audioread library to decode an audio file and match it. """ import audioread with audioread.audio_open(path) as f: return match(apikey, iter(f), f.samplerate, int(f.duration), f.channels, metadata)
[ "def", "match_file", "(", "apikey", ",", "path", ",", "metadata", "=", "None", ")", ":", "import", "audioread", "with", "audioread", ".", "audio_open", "(", "path", ")", "as", "f", ":", "return", "match", "(", "apikey", ",", "iter", "(", "f", ")", ",", "f", ".", "samplerate", ",", "int", "(", "f", ".", "duration", ")", ",", "f", ".", "channels", ",", "metadata", ")" ]
41.857143
7.857143
def _instantiateFont(self, path): """ Return a instance of a font object with all the given subclasses """ return self._fontClass(path, libClass=self._libClass, kerningClass=self._kerningClass, groupsClass=self._groupsClass, infoClass=self._infoClass, featuresClass=self._featuresClass, glyphClass=self._glyphClass, glyphContourClass=self._glyphContourClass, glyphPointClass=self._glyphPointClass, glyphComponentClass=self._glyphComponentClass, glyphAnchorClass=self._glyphAnchorClass)
[ "def", "_instantiateFont", "(", "self", ",", "path", ")", ":", "return", "self", ".", "_fontClass", "(", "path", ",", "libClass", "=", "self", ".", "_libClass", ",", "kerningClass", "=", "self", ".", "_kerningClass", ",", "groupsClass", "=", "self", ".", "_groupsClass", ",", "infoClass", "=", "self", ".", "_infoClass", ",", "featuresClass", "=", "self", ".", "_featuresClass", ",", "glyphClass", "=", "self", ".", "_glyphClass", ",", "glyphContourClass", "=", "self", ".", "_glyphContourClass", ",", "glyphPointClass", "=", "self", ".", "_glyphPointClass", ",", "glyphComponentClass", "=", "self", ".", "_glyphComponentClass", ",", "glyphAnchorClass", "=", "self", ".", "_glyphAnchorClass", ")" ]
39.375
5.5
def create_closure_model(cls): """Creates a <Model>Closure model in the same module as the model.""" meta_vals = { 'unique_together': (("parent", "child"),) } if getattr(cls._meta, 'db_table', None): meta_vals['db_table'] = '%sclosure' % getattr(cls._meta, 'db_table') model = type('%sClosure' % cls.__name__, (models.Model,), { 'parent': models.ForeignKey( cls.__name__, related_name=cls.closure_parentref() ), 'child': models.ForeignKey( cls.__name__, related_name=cls.closure_childref() ), 'depth': models.IntegerField(), '__module__': cls.__module__, '__unicode__': _closure_model_unicode, 'Meta': type('Meta', (object,), meta_vals), }) setattr(cls, "_closure_model", model) return model
[ "def", "create_closure_model", "(", "cls", ")", ":", "meta_vals", "=", "{", "'unique_together'", ":", "(", "(", "\"parent\"", ",", "\"child\"", ")", ",", ")", "}", "if", "getattr", "(", "cls", ".", "_meta", ",", "'db_table'", ",", "None", ")", ":", "meta_vals", "[", "'db_table'", "]", "=", "'%sclosure'", "%", "getattr", "(", "cls", ".", "_meta", ",", "'db_table'", ")", "model", "=", "type", "(", "'%sClosure'", "%", "cls", ".", "__name__", ",", "(", "models", ".", "Model", ",", ")", ",", "{", "'parent'", ":", "models", ".", "ForeignKey", "(", "cls", ".", "__name__", ",", "related_name", "=", "cls", ".", "closure_parentref", "(", ")", ")", ",", "'child'", ":", "models", ".", "ForeignKey", "(", "cls", ".", "__name__", ",", "related_name", "=", "cls", ".", "closure_childref", "(", ")", ")", ",", "'depth'", ":", "models", ".", "IntegerField", "(", ")", ",", "'__module__'", ":", "cls", ".", "__module__", ",", "'__unicode__'", ":", "_closure_model_unicode", ",", "'Meta'", ":", "type", "(", "'Meta'", ",", "(", "object", ",", ")", ",", "meta_vals", ")", ",", "}", ")", "setattr", "(", "cls", ",", "\"_closure_model\"", ",", "model", ")", "return", "model" ]
36.173913
14.478261
def make(self, analyte): """ Make filter for specified analyte(s). Filter specified in filt.switches. Parameters ---------- analyte : str or array_like Name or list of names of analytes. Returns ------- array_like boolean filter """ if analyte is None: analyte = self.analytes elif isinstance(analyte, str): analyte = [analyte] out = [] for f in self.components.keys(): for a in analyte: if self.switches[a][f]: out.append(f) key = ' & '.join(sorted(out)) for a in analyte: self.keys[a] = key return self.make_fromkey(key)
[ "def", "make", "(", "self", ",", "analyte", ")", ":", "if", "analyte", "is", "None", ":", "analyte", "=", "self", ".", "analytes", "elif", "isinstance", "(", "analyte", ",", "str", ")", ":", "analyte", "=", "[", "analyte", "]", "out", "=", "[", "]", "for", "f", "in", "self", ".", "components", ".", "keys", "(", ")", ":", "for", "a", "in", "analyte", ":", "if", "self", ".", "switches", "[", "a", "]", "[", "f", "]", ":", "out", ".", "append", "(", "f", ")", "key", "=", "' & '", ".", "join", "(", "sorted", "(", "out", ")", ")", "for", "a", "in", "analyte", ":", "self", ".", "keys", "[", "a", "]", "=", "key", "return", "self", ".", "make_fromkey", "(", "key", ")" ]
24.7
14.233333
def remove_backslash_r(filename, encoding): """ A helpful utility to remove Carriage Return from any file. This will read a file into memory, and overwrite the contents of the original file. TODO: This function may be a liability :param filename: :return: """ with open(filename, 'r', encoding=encoding, newline=r'\n') as filereader: contents = filereader.read() contents = re.sub(r'\r', '', contents) with open(filename, "w") as filewriter: filewriter.truncate() filewriter.write(contents)
[ "def", "remove_backslash_r", "(", "filename", ",", "encoding", ")", ":", "with", "open", "(", "filename", ",", "'r'", ",", "encoding", "=", "encoding", ",", "newline", "=", "r'\\n'", ")", "as", "filereader", ":", "contents", "=", "filereader", ".", "read", "(", ")", "contents", "=", "re", ".", "sub", "(", "r'\\r'", ",", "''", ",", "contents", ")", "with", "open", "(", "filename", ",", "\"w\"", ")", "as", "filewriter", ":", "filewriter", ".", "truncate", "(", ")", "filewriter", ".", "write", "(", "contents", ")" ]
30
17.8
def encoded(string, encoding='utf-8'): """Cast string to binary_type. :param string: six.binary_type or six.text_type :param encoding: encoding which the object is forced to :return: six.binary_type """ assert isinstance(string, string_types) or isinstance(string, binary_type) if isinstance(string, text_type): return string.encode(encoding) try: # make sure the string can be decoded in the specified encoding ... string.decode(encoding) return string except UnicodeDecodeError: # ... if not use latin1 as best guess to decode the string before encoding as # specified. return string.decode('latin1').encode(encoding)
[ "def", "encoded", "(", "string", ",", "encoding", "=", "'utf-8'", ")", ":", "assert", "isinstance", "(", "string", ",", "string_types", ")", "or", "isinstance", "(", "string", ",", "binary_type", ")", "if", "isinstance", "(", "string", ",", "text_type", ")", ":", "return", "string", ".", "encode", "(", "encoding", ")", "try", ":", "# make sure the string can be decoded in the specified encoding ...", "string", ".", "decode", "(", "encoding", ")", "return", "string", "except", "UnicodeDecodeError", ":", "# ... if not use latin1 as best guess to decode the string before encoding as", "# specified.", "return", "string", ".", "decode", "(", "'latin1'", ")", ".", "encode", "(", "encoding", ")" ]
38.611111
17.333333
def __setLock(self, command): """Set lock on requests.""" if command in (TURN_ON, TURN_OFF): self._operation = command elif command in INV_SOURCES: self._operation = SOURCE else: self._operation = ALL self._isLocked = True self._timer = time.time()
[ "def", "__setLock", "(", "self", ",", "command", ")", ":", "if", "command", "in", "(", "TURN_ON", ",", "TURN_OFF", ")", ":", "self", ".", "_operation", "=", "command", "elif", "command", "in", "INV_SOURCES", ":", "self", ".", "_operation", "=", "SOURCE", "else", ":", "self", ".", "_operation", "=", "ALL", "self", ".", "_isLocked", "=", "True", "self", ".", "_timer", "=", "time", ".", "time", "(", ")" ]
32.3
7.6
def send_worker_queue_message(self, *, batch_id, job_name, entry_point, worker_args, retry_count=0): """Send a message to the `worker_queue` for a worker to execute the requests job Args: batch_id (`str`): Unique ID of the batch the job belongs to job_name (`str`): Non-unique ID of the job. This is used to ensure that the same job is only scheduled a single time per batch entry_point (`dict`): A dictionary providing the entry point information for the worker to load the class worker_args (`dict`): A dictionary with the arguments required by the worker class (if any, can be an empty dictionary) retry_count (`int`): The number of times this one job has been attempted to be executed. If a job fails to execute after 3 retries it will be marked as failed Returns: `None` """ try: job_id = str(uuid4()) self.job_queue.send_message( MessageBody=json.dumps({ 'batch_id': batch_id, 'job_id': job_id, 'job_name': job_name, 'entry_point': entry_point, 'worker_args': worker_args, }), MessageDeduplicationId=job_id, MessageGroupId=batch_id, MessageAttributes={ 'RetryCount': { 'StringValue': str(retry_count), 'DataType': 'Number' } } ) if retry_count == 0: job = SchedulerJob() job.job_id = job_id job.batch_id = batch_id job.status = SchedulerStatus.PENDING job.data = worker_args db.session.add(job) db.session.commit() except: self.log.exception('Error when processing worker task')
[ "def", "send_worker_queue_message", "(", "self", ",", "*", ",", "batch_id", ",", "job_name", ",", "entry_point", ",", "worker_args", ",", "retry_count", "=", "0", ")", ":", "try", ":", "job_id", "=", "str", "(", "uuid4", "(", ")", ")", "self", ".", "job_queue", ".", "send_message", "(", "MessageBody", "=", "json", ".", "dumps", "(", "{", "'batch_id'", ":", "batch_id", ",", "'job_id'", ":", "job_id", ",", "'job_name'", ":", "job_name", ",", "'entry_point'", ":", "entry_point", ",", "'worker_args'", ":", "worker_args", ",", "}", ")", ",", "MessageDeduplicationId", "=", "job_id", ",", "MessageGroupId", "=", "batch_id", ",", "MessageAttributes", "=", "{", "'RetryCount'", ":", "{", "'StringValue'", ":", "str", "(", "retry_count", ")", ",", "'DataType'", ":", "'Number'", "}", "}", ")", "if", "retry_count", "==", "0", ":", "job", "=", "SchedulerJob", "(", ")", "job", ".", "job_id", "=", "job_id", "job", ".", "batch_id", "=", "batch_id", "job", ".", "status", "=", "SchedulerStatus", ".", "PENDING", "job", ".", "data", "=", "worker_args", "db", ".", "session", ".", "add", "(", "job", ")", "db", ".", "session", ".", "commit", "(", ")", "except", ":", "self", ".", "log", ".", "exception", "(", "'Error when processing worker task'", ")" ]
40.708333
20.395833
def by_median_household_income(self, lower=-1, upper=2 ** 31, zipcode_type=ZipcodeType.Standard, sort_by=SimpleZipcode.median_household_income.name, ascending=False, returns=DEFAULT_LIMIT): """ Search zipcode information by median household income. """ return self.query( median_household_income_lower=lower, median_household_income_upper=upper, sort_by=sort_by, zipcode_type=zipcode_type, ascending=ascending, returns=returns, )
[ "def", "by_median_household_income", "(", "self", ",", "lower", "=", "-", "1", ",", "upper", "=", "2", "**", "31", ",", "zipcode_type", "=", "ZipcodeType", ".", "Standard", ",", "sort_by", "=", "SimpleZipcode", ".", "median_household_income", ".", "name", ",", "ascending", "=", "False", ",", "returns", "=", "DEFAULT_LIMIT", ")", ":", "return", "self", ".", "query", "(", "median_household_income_lower", "=", "lower", ",", "median_household_income_upper", "=", "upper", ",", "sort_by", "=", "sort_by", ",", "zipcode_type", "=", "zipcode_type", ",", "ascending", "=", "ascending", ",", "returns", "=", "returns", ",", ")" ]
44.5
14.25
def random(self, num_bytes): """ Get random bytes from YubiHSM. The random data is DRBG_CTR seeded on each startup by a hardware TRNG, so it should be of very good quality. @type num_bytes: integer @return: Bytes with random data @rtype: string @see: L{pyhsm.basic_cmd.YHSM_Cmd_Random} """ return pyhsm.basic_cmd.YHSM_Cmd_Random(self.stick, num_bytes).execute()
[ "def", "random", "(", "self", ",", "num_bytes", ")", ":", "return", "pyhsm", ".", "basic_cmd", ".", "YHSM_Cmd_Random", "(", "self", ".", "stick", ",", "num_bytes", ")", ".", "execute", "(", ")" ]
28.733333
19.4
def _extract(self, in_tile=None, in_data=None, out_tile=None): """Extract data from tile.""" return self.config.output.extract_subset( input_data_tiles=[(in_tile, in_data)], out_tile=out_tile )
[ "def", "_extract", "(", "self", ",", "in_tile", "=", "None", ",", "in_data", "=", "None", ",", "out_tile", "=", "None", ")", ":", "return", "self", ".", "config", ".", "output", ".", "extract_subset", "(", "input_data_tiles", "=", "[", "(", "in_tile", ",", "in_data", ")", "]", ",", "out_tile", "=", "out_tile", ")" ]
39.333333
13.833333
def do_list(self, arg): """l(ist) [first [,last] | .] List source code for the current file. Without arguments, list 11 lines around the current line or continue the previous listing. With . as argument, list 11 lines around the current line. With one argument, list 11 lines starting at that line. With two arguments, list the given range; if the second argument is less than the first, it is a count. The current line in the current frame is indicated by "->". If an exception is being debugged, the line where the exception was originally raised or propagated is indicated by ">>", if it differs from the current line. """ self.lastcmd = 'list' last = None if arg and arg != '.': try: if ',' in arg: first, last = arg.split(',') first = int(first.strip()) last = int(last.strip()) if last < first: # assume it's a count last = first + last else: first = int(arg.strip()) first = max(1, first - 5) except ValueError: self.error('Error in argument: %r' % arg) return elif self.lineno is None or arg == '.': first = max(1, self.curframe.f_lineno - 5) else: first = self.lineno + 1 if last is None: last = first + 10 filename = self.curframe.f_code.co_filename breaklist = self.get_file_breaks(filename) try: lines = linecache.getlines(filename, self.curframe.f_globals) self._print_lines(lines[first-1:last], first, breaklist, self.curframe) self.lineno = min(last, len(lines)) if len(lines) < last: self.message('[EOF]') except KeyboardInterrupt: pass
[ "def", "do_list", "(", "self", ",", "arg", ")", ":", "self", ".", "lastcmd", "=", "'list'", "last", "=", "None", "if", "arg", "and", "arg", "!=", "'.'", ":", "try", ":", "if", "','", "in", "arg", ":", "first", ",", "last", "=", "arg", ".", "split", "(", "','", ")", "first", "=", "int", "(", "first", ".", "strip", "(", ")", ")", "last", "=", "int", "(", "last", ".", "strip", "(", ")", ")", "if", "last", "<", "first", ":", "# assume it's a count", "last", "=", "first", "+", "last", "else", ":", "first", "=", "int", "(", "arg", ".", "strip", "(", ")", ")", "first", "=", "max", "(", "1", ",", "first", "-", "5", ")", "except", "ValueError", ":", "self", ".", "error", "(", "'Error in argument: %r'", "%", "arg", ")", "return", "elif", "self", ".", "lineno", "is", "None", "or", "arg", "==", "'.'", ":", "first", "=", "max", "(", "1", ",", "self", ".", "curframe", ".", "f_lineno", "-", "5", ")", "else", ":", "first", "=", "self", ".", "lineno", "+", "1", "if", "last", "is", "None", ":", "last", "=", "first", "+", "10", "filename", "=", "self", ".", "curframe", ".", "f_code", ".", "co_filename", "breaklist", "=", "self", ".", "get_file_breaks", "(", "filename", ")", "try", ":", "lines", "=", "linecache", ".", "getlines", "(", "filename", ",", "self", ".", "curframe", ".", "f_globals", ")", "self", ".", "_print_lines", "(", "lines", "[", "first", "-", "1", ":", "last", "]", ",", "first", ",", "breaklist", ",", "self", ".", "curframe", ")", "self", ".", "lineno", "=", "min", "(", "last", ",", "len", "(", "lines", ")", ")", "if", "len", "(", "lines", ")", "<", "last", ":", "self", ".", "message", "(", "'[EOF]'", ")", "except", "KeyboardInterrupt", ":", "pass" ]
40.469388
15.489796
def create_random_seq(character, action_metadata, direction, length=8): """Creates a random sequence.""" start = tf.random.uniform([], maxval=action_metadata[1], dtype=tf.int32) return create_seq(character, action_metadata, direction, length, start)
[ "def", "create_random_seq", "(", "character", ",", "action_metadata", ",", "direction", ",", "length", "=", "8", ")", ":", "start", "=", "tf", ".", "random", ".", "uniform", "(", "[", "]", ",", "maxval", "=", "action_metadata", "[", "1", "]", ",", "dtype", "=", "tf", ".", "int32", ")", "return", "create_seq", "(", "character", ",", "action_metadata", ",", "direction", ",", "length", ",", "start", ")" ]
63
24.5
def command(cmd): """Execute command and raise an exception upon an error. >>> 'README' in command('ls') True >>> command('nonexistingcommand') #doctest: +ELLIPSIS Traceback (most recent call last): ... SdistCreationError """ status, out = commands.getstatusoutput(cmd) if status is not 0: logger.error("Something went wrong:") logger.error(out) raise SdistCreationError() return out
[ "def", "command", "(", "cmd", ")", ":", "status", ",", "out", "=", "commands", ".", "getstatusoutput", "(", "cmd", ")", "if", "status", "is", "not", "0", ":", "logger", ".", "error", "(", "\"Something went wrong:\"", ")", "logger", ".", "error", "(", "out", ")", "raise", "SdistCreationError", "(", ")", "return", "out" ]
26.470588
16.529412
def convert_shortcut_quick_reply(items): """ support shortcut [{'title':'title', 'payload':'payload'}] """ if items is not None and isinstance(items, list): result = [] for item in items: if isinstance(item, QuickReply): result.append(item) elif isinstance(item, dict): result.append(QuickReply(title=item.get('title'), payload=item.get('payload'))) else: raise ValueError('Invalid quick_replies variables') return result else: return items
[ "def", "convert_shortcut_quick_reply", "(", "items", ")", ":", "if", "items", "is", "not", "None", "and", "isinstance", "(", "items", ",", "list", ")", ":", "result", "=", "[", "]", "for", "item", "in", "items", ":", "if", "isinstance", "(", "item", ",", "QuickReply", ")", ":", "result", ".", "append", "(", "item", ")", "elif", "isinstance", "(", "item", ",", "dict", ")", ":", "result", ".", "append", "(", "QuickReply", "(", "title", "=", "item", ".", "get", "(", "'title'", ")", ",", "payload", "=", "item", ".", "get", "(", "'payload'", ")", ")", ")", "else", ":", "raise", "ValueError", "(", "'Invalid quick_replies variables'", ")", "return", "result", "else", ":", "return", "items" ]
38.8125
15.5625
def load_lines(filename): """ Load a text file as an array of lines. Args: filename: Path to the input file. Returns: An array of strings, each representing an individual line. """ with open(filename, 'r', encoding='utf-8') as f: return [line.rstrip('\n') for line in f.readlines()]
[ "def", "load_lines", "(", "filename", ")", ":", "with", "open", "(", "filename", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "return", "[", "line", ".", "rstrip", "(", "'\\n'", ")", "for", "line", "in", "f", ".", "readlines", "(", ")", "]" ]
24.692308
19.615385
def abspath(myPath): import sys, os """ Get absolute path to resource, works for dev and for PyInstaller """ try: # PyInstaller creates a temp folder and stores path in _MEIPASS base_path = sys._MEIPASS return os.path.join(base_path, os.path.basename(myPath)) except Exception: base_path = os.path.abspath(os.path.dirname(__file__)) return os.path.join(base_path, myPath)
[ "def", "abspath", "(", "myPath", ")", ":", "import", "sys", ",", "os", "try", ":", "# PyInstaller creates a temp folder and stores path in _MEIPASS", "base_path", "=", "sys", ".", "_MEIPASS", "return", "os", ".", "path", ".", "join", "(", "base_path", ",", "os", ".", "path", ".", "basename", "(", "myPath", ")", ")", "except", "Exception", ":", "base_path", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", "return", "os", ".", "path", ".", "join", "(", "base_path", ",", "myPath", ")" ]
41.8
18.4
def parse_with_retrieved(self, retrieved): """ Receives in input a dictionary of retrieved nodes. Does all the logic here. """ from aiida.common.exceptions import InvalidOperation import os output_path = None error_path = None try: output_path, error_path = self._fetch_output_files(retrieved) except InvalidOperation: raise except IOError as e: self.logger.error(e.message) return False, () if output_path is None and error_path is None: self.logger.error("No output files found") return False, () return True, self._get_output_nodes(output_path, error_path)
[ "def", "parse_with_retrieved", "(", "self", ",", "retrieved", ")", ":", "from", "aiida", ".", "common", ".", "exceptions", "import", "InvalidOperation", "import", "os", "output_path", "=", "None", "error_path", "=", "None", "try", ":", "output_path", ",", "error_path", "=", "self", ".", "_fetch_output_files", "(", "retrieved", ")", "except", "InvalidOperation", ":", "raise", "except", "IOError", "as", "e", ":", "self", ".", "logger", ".", "error", "(", "e", ".", "message", ")", "return", "False", ",", "(", ")", "if", "output_path", "is", "None", "and", "error_path", "is", "None", ":", "self", ".", "logger", ".", "error", "(", "\"No output files found\"", ")", "return", "False", ",", "(", ")", "return", "True", ",", "self", ".", "_get_output_nodes", "(", "output_path", ",", "error_path", ")" ]
31.130435
17.565217
def visit_FunctionDef(self, node): """Visit a function node.""" node = self.get_function_node(node) if node is not None: node._async = False
[ "def", "visit_FunctionDef", "(", "self", ",", "node", ")", ":", "node", "=", "self", ".", "get_function_node", "(", "node", ")", "if", "node", "is", "not", "None", ":", "node", ".", "_async", "=", "False" ]
34.4
6
def generate_certificates(base_dir, *peer_names, pubKeyDir=None, secKeyDir=None, sigKeyDir=None, verkeyDir=None, clean=True): ''' Generate client and server CURVE certificate files''' pubKeyDir = pubKeyDir or 'public_keys' secKeyDir = secKeyDir or 'private_keys' verkeyDir = verkeyDir or 'verif_keys' sigKeyDir = sigKeyDir or 'sig_keys' # keys_dir = os.path.join(base_dir, 'certificates') e_keys_dir = os.path.join(base_dir, '_enc') s_keys_dir = os.path.join(base_dir, '_sig') public_keys_dir = os.path.join(base_dir, pubKeyDir) secret_keys_dir = os.path.join(base_dir, secKeyDir) ver_keys_dir = os.path.join(base_dir, verkeyDir) sig_keys_dir = os.path.join(base_dir, sigKeyDir) # Create directories for certificates, remove old content if necessary for d in [e_keys_dir, s_keys_dir, public_keys_dir, secret_keys_dir, ver_keys_dir, sig_keys_dir]: if clean and os.path.exists(d): shutil.rmtree(d) os.makedirs(d, exist_ok=True) # create new keys in certificates dir for peer_name in peer_names: createEncAndSigKeys(e_keys_dir, s_keys_dir, peer_name) # move public keys to appropriate directory for keys_dir, pkdir, skdir in [ (e_keys_dir, public_keys_dir, secret_keys_dir), (s_keys_dir, ver_keys_dir, sig_keys_dir) ]: moveKeyFilesToCorrectLocations(keys_dir, pkdir, skdir) shutil.rmtree(e_keys_dir) shutil.rmtree(s_keys_dir) print('Public keys in {}'.format(public_keys_dir)) print('Private keys in {}'.format(secret_keys_dir)) print('Verification keys in {}'.format(ver_keys_dir)) print('Signing keys in {}'.format(sig_keys_dir))
[ "def", "generate_certificates", "(", "base_dir", ",", "*", "peer_names", ",", "pubKeyDir", "=", "None", ",", "secKeyDir", "=", "None", ",", "sigKeyDir", "=", "None", ",", "verkeyDir", "=", "None", ",", "clean", "=", "True", ")", ":", "pubKeyDir", "=", "pubKeyDir", "or", "'public_keys'", "secKeyDir", "=", "secKeyDir", "or", "'private_keys'", "verkeyDir", "=", "verkeyDir", "or", "'verif_keys'", "sigKeyDir", "=", "sigKeyDir", "or", "'sig_keys'", "# keys_dir = os.path.join(base_dir, 'certificates')", "e_keys_dir", "=", "os", ".", "path", ".", "join", "(", "base_dir", ",", "'_enc'", ")", "s_keys_dir", "=", "os", ".", "path", ".", "join", "(", "base_dir", ",", "'_sig'", ")", "public_keys_dir", "=", "os", ".", "path", ".", "join", "(", "base_dir", ",", "pubKeyDir", ")", "secret_keys_dir", "=", "os", ".", "path", ".", "join", "(", "base_dir", ",", "secKeyDir", ")", "ver_keys_dir", "=", "os", ".", "path", ".", "join", "(", "base_dir", ",", "verkeyDir", ")", "sig_keys_dir", "=", "os", ".", "path", ".", "join", "(", "base_dir", ",", "sigKeyDir", ")", "# Create directories for certificates, remove old content if necessary", "for", "d", "in", "[", "e_keys_dir", ",", "s_keys_dir", ",", "public_keys_dir", ",", "secret_keys_dir", ",", "ver_keys_dir", ",", "sig_keys_dir", "]", ":", "if", "clean", "and", "os", ".", "path", ".", "exists", "(", "d", ")", ":", "shutil", ".", "rmtree", "(", "d", ")", "os", ".", "makedirs", "(", "d", ",", "exist_ok", "=", "True", ")", "# create new keys in certificates dir", "for", "peer_name", "in", "peer_names", ":", "createEncAndSigKeys", "(", "e_keys_dir", ",", "s_keys_dir", ",", "peer_name", ")", "# move public keys to appropriate directory", "for", "keys_dir", ",", "pkdir", ",", "skdir", "in", "[", "(", "e_keys_dir", ",", "public_keys_dir", ",", "secret_keys_dir", ")", ",", "(", "s_keys_dir", ",", "ver_keys_dir", ",", "sig_keys_dir", ")", "]", ":", "moveKeyFilesToCorrectLocations", "(", "keys_dir", ",", "pkdir", ",", "skdir", ")", "shutil", ".", "rmtree", "(", "e_keys_dir", ")", "shutil", ".", "rmtree", "(", "s_keys_dir", ")", "print", "(", "'Public keys in {}'", ".", "format", "(", "public_keys_dir", ")", ")", "print", "(", "'Private keys in {}'", ".", "format", "(", "secret_keys_dir", ")", ")", "print", "(", "'Verification keys in {}'", ".", "format", "(", "ver_keys_dir", ")", ")", "print", "(", "'Signing keys in {}'", ".", "format", "(", "sig_keys_dir", ")", ")" ]
39.976744
17
def role_get(name, user=None, host=None, port=None, maintenance_db=None, password=None, runas=None, return_password=False): ''' Return a dict with information about users of a Postgres server. Set return_password to True to get password hash in the result. CLI Example: .. code-block:: bash salt '*' postgres.role_get postgres ''' all_users = user_list(user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas, return_password=return_password) try: return all_users.get(name, None) except AttributeError: log.error('Could not retrieve Postgres role. Is Postgres running?') return None
[ "def", "role_get", "(", "name", ",", "user", "=", "None", ",", "host", "=", "None", ",", "port", "=", "None", ",", "maintenance_db", "=", "None", ",", "password", "=", "None", ",", "runas", "=", "None", ",", "return_password", "=", "False", ")", ":", "all_users", "=", "user_list", "(", "user", "=", "user", ",", "host", "=", "host", ",", "port", "=", "port", ",", "maintenance_db", "=", "maintenance_db", ",", "password", "=", "password", ",", "runas", "=", "runas", ",", "return_password", "=", "return_password", ")", "try", ":", "return", "all_users", ".", "get", "(", "name", ",", "None", ")", "except", "AttributeError", ":", "log", ".", "error", "(", "'Could not retrieve Postgres role. Is Postgres running?'", ")", "return", "None" ]
33.56
21.32
def is_storage(url, storage=None): """ Check if file is a local file or a storage file. File is considered local if: - URL is a local path. - URL starts by "file://" - a "storage" is provided. Args: url (str): file path or URL storage (str): Storage name. Returns: bool: return True if file is local. """ if storage: return True split_url = url.split('://', 1) if len(split_url) == 2 and split_url[0].lower() != 'file': return True return False
[ "def", "is_storage", "(", "url", ",", "storage", "=", "None", ")", ":", "if", "storage", ":", "return", "True", "split_url", "=", "url", ".", "split", "(", "'://'", ",", "1", ")", "if", "len", "(", "split_url", ")", "==", "2", "and", "split_url", "[", "0", "]", ".", "lower", "(", ")", "!=", "'file'", ":", "return", "True", "return", "False" ]
24.090909
16.272727
def define_from_values(cls, xdtu, ydtu, zdtu, xdtu_0, ydtu_0, zdtu_0): """Define class object from from provided values. Parameters ---------- xdtu : float XDTU fits keyword value. ydtu : float YDTU fits keyword value. zdtu : float ZDTU fits keyword value. xdtu_0 : float XDTU_0 fits keyword value. ydtu_0 : float YDTU_0 fits keyword value. zdtu_0 : float ZDTU_0 fits keyword value. """ self = DtuConfiguration() # define DTU variables self.xdtu = xdtu self.ydtu = ydtu self.zdtu = zdtu self.xdtu_0 = xdtu_0 self.ydtu_0 = ydtu_0 self.zdtu_0 = zdtu_0 return self
[ "def", "define_from_values", "(", "cls", ",", "xdtu", ",", "ydtu", ",", "zdtu", ",", "xdtu_0", ",", "ydtu_0", ",", "zdtu_0", ")", ":", "self", "=", "DtuConfiguration", "(", ")", "# define DTU variables", "self", ".", "xdtu", "=", "xdtu", "self", ".", "ydtu", "=", "ydtu", "self", ".", "zdtu", "=", "zdtu", "self", ".", "xdtu_0", "=", "xdtu_0", "self", ".", "ydtu_0", "=", "ydtu_0", "self", ".", "zdtu_0", "=", "zdtu_0", "return", "self" ]
26.206897
15.448276
def replay_scope(self, sess): """Enters a replay scope that unsets it at the end.""" current_replay = self.replay(sess) try: self.set_replay(sess, True) yield finally: self.set_replay(sess, current_replay)
[ "def", "replay_scope", "(", "self", ",", "sess", ")", ":", "current_replay", "=", "self", ".", "replay", "(", "sess", ")", "try", ":", "self", ".", "set_replay", "(", "sess", ",", "True", ")", "yield", "finally", ":", "self", ".", "set_replay", "(", "sess", ",", "current_replay", ")" ]
29
14
def _load_plugins(self): ''' Sets up all plugins, defaults and settings.py ''' plugins = self.settings['PLUGINS'] self.plugins_dict = {} for key in plugins: # skip loading the plugin if its value is None if plugins[key] is None: continue # valid plugin, import and setup self.logger.debug("Trying to load plugin {cls}".format(cls=key)) the_class = self._import_class(key) instance = the_class() instance._set_logger(self.logger) if not self.unit_test: instance.setup(self.settings) the_schema = None with open(self.settings['PLUGIN_DIR'] + instance.schema) as the_file: the_schema = json.load(the_file) mini = {} mini['instance'] = instance mini['schema'] = the_schema self.logger.debug("Successfully loaded plugin {cls}".format(cls=key)) self.plugins_dict[plugins[key]] = mini self.plugins_dict = OrderedDict(sorted(list(self.plugins_dict.items()), key=lambda t: t[0]))
[ "def", "_load_plugins", "(", "self", ")", ":", "plugins", "=", "self", ".", "settings", "[", "'PLUGINS'", "]", "self", ".", "plugins_dict", "=", "{", "}", "for", "key", "in", "plugins", ":", "# skip loading the plugin if its value is None", "if", "plugins", "[", "key", "]", "is", "None", ":", "continue", "# valid plugin, import and setup", "self", ".", "logger", ".", "debug", "(", "\"Trying to load plugin {cls}\"", ".", "format", "(", "cls", "=", "key", ")", ")", "the_class", "=", "self", ".", "_import_class", "(", "key", ")", "instance", "=", "the_class", "(", ")", "instance", ".", "_set_logger", "(", "self", ".", "logger", ")", "if", "not", "self", ".", "unit_test", ":", "instance", ".", "setup", "(", "self", ".", "settings", ")", "the_schema", "=", "None", "with", "open", "(", "self", ".", "settings", "[", "'PLUGIN_DIR'", "]", "+", "instance", ".", "schema", ")", "as", "the_file", ":", "the_schema", "=", "json", ".", "load", "(", "the_file", ")", "mini", "=", "{", "}", "mini", "[", "'instance'", "]", "=", "instance", "mini", "[", "'schema'", "]", "=", "the_schema", "self", ".", "logger", ".", "debug", "(", "\"Successfully loaded plugin {cls}\"", ".", "format", "(", "cls", "=", "key", ")", ")", "self", ".", "plugins_dict", "[", "plugins", "[", "key", "]", "]", "=", "mini", "self", ".", "plugins_dict", "=", "OrderedDict", "(", "sorted", "(", "list", "(", "self", ".", "plugins_dict", ".", "items", "(", ")", ")", ",", "key", "=", "lambda", "t", ":", "t", "[", "0", "]", ")", ")" ]
39.166667
17.9
def highres_imu_encode(self, time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag, abs_pressure, diff_pressure, pressure_alt, temperature, fields_updated): ''' The IMU readings in SI units in NED body frame time_usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t) xacc : X acceleration (m/s^2) (float) yacc : Y acceleration (m/s^2) (float) zacc : Z acceleration (m/s^2) (float) xgyro : Angular speed around X axis (rad / sec) (float) ygyro : Angular speed around Y axis (rad / sec) (float) zgyro : Angular speed around Z axis (rad / sec) (float) xmag : X Magnetic field (Gauss) (float) ymag : Y Magnetic field (Gauss) (float) zmag : Z Magnetic field (Gauss) (float) abs_pressure : Absolute pressure in millibar (float) diff_pressure : Differential pressure in millibar (float) pressure_alt : Altitude calculated from pressure (float) temperature : Temperature in degrees celsius (float) fields_updated : Bitmask for fields that have updated since last message, bit 0 = xacc, bit 12: temperature (uint16_t) ''' return MAVLink_highres_imu_message(time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag, abs_pressure, diff_pressure, pressure_alt, temperature, fields_updated)
[ "def", "highres_imu_encode", "(", "self", ",", "time_usec", ",", "xacc", ",", "yacc", ",", "zacc", ",", "xgyro", ",", "ygyro", ",", "zgyro", ",", "xmag", ",", "ymag", ",", "zmag", ",", "abs_pressure", ",", "diff_pressure", ",", "pressure_alt", ",", "temperature", ",", "fields_updated", ")", ":", "return", "MAVLink_highres_imu_message", "(", "time_usec", ",", "xacc", ",", "yacc", ",", "zacc", ",", "xgyro", ",", "ygyro", ",", "zgyro", ",", "xmag", ",", "ymag", ",", "zmag", ",", "abs_pressure", ",", "diff_pressure", ",", "pressure_alt", ",", "temperature", ",", "fields_updated", ")" ]
80.954545
52.045455
def remove_from_group(self, group, user): """ Remove a user from a group :type user: str :param user: User's email :type group: str :param group: Group name :rtype: dict :return: an empty dictionary """ data = {'group': group, 'user': user} return self.post('removeUserFromGroup', data)
[ "def", "remove_from_group", "(", "self", ",", "group", ",", "user", ")", ":", "data", "=", "{", "'group'", ":", "group", ",", "'user'", ":", "user", "}", "return", "self", ".", "post", "(", "'removeUserFromGroup'", ",", "data", ")" ]
24.2
14.466667
def log_level(io_handler, level=None, name=None): """ Prints/Changes log level """ # Get the logger logger = logging.getLogger(name) # Normalize the name if not name: name = "Root" if not level: # Level not given: print the logger level io_handler.write_line( "{0} log level: {1} (real: {2})", name, logging.getLevelName(logger.getEffectiveLevel()), logging.getLevelName(logger.level), ) else: # Set the logger level try: logger.setLevel(level.upper()) io_handler.write_line("New level for {0}: {1}", name, level) except ValueError: io_handler.write_line("Invalid log level: {0}", level)
[ "def", "log_level", "(", "io_handler", ",", "level", "=", "None", ",", "name", "=", "None", ")", ":", "# Get the logger", "logger", "=", "logging", ".", "getLogger", "(", "name", ")", "# Normalize the name", "if", "not", "name", ":", "name", "=", "\"Root\"", "if", "not", "level", ":", "# Level not given: print the logger level", "io_handler", ".", "write_line", "(", "\"{0} log level: {1} (real: {2})\"", ",", "name", ",", "logging", ".", "getLevelName", "(", "logger", ".", "getEffectiveLevel", "(", ")", ")", ",", "logging", ".", "getLevelName", "(", "logger", ".", "level", ")", ",", ")", "else", ":", "# Set the logger level", "try", ":", "logger", ".", "setLevel", "(", "level", ".", "upper", "(", ")", ")", "io_handler", ".", "write_line", "(", "\"New level for {0}: {1}\"", ",", "name", ",", "level", ")", "except", "ValueError", ":", "io_handler", ".", "write_line", "(", "\"Invalid log level: {0}\"", ",", "level", ")" ]
32
16.461538
def only_for(theme, redirect_to='/', raise_error=None): """ Decorator for restrict access to views according by list of themes. Params: * ``theme`` - string or list of themes where decorated view must be * ``redirect_to`` - url or name of url pattern for redirect if CURRENT_THEME not in themes * ``raise_error`` - error class for raising Example: .. code:: python # views.py from django_vest import only_for @only_for('black_theme') def my_view(request): ... """ def check_theme(*args, **kwargs): if isinstance(theme, six.string_types): themes = (theme,) else: themes = theme if settings.CURRENT_THEME is None: return True result = settings.CURRENT_THEME in themes if not result and raise_error is not None: raise raise_error return result return user_passes_test(check_theme, login_url=redirect_to)
[ "def", "only_for", "(", "theme", ",", "redirect_to", "=", "'/'", ",", "raise_error", "=", "None", ")", ":", "def", "check_theme", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "theme", ",", "six", ".", "string_types", ")", ":", "themes", "=", "(", "theme", ",", ")", "else", ":", "themes", "=", "theme", "if", "settings", ".", "CURRENT_THEME", "is", "None", ":", "return", "True", "result", "=", "settings", ".", "CURRENT_THEME", "in", "themes", "if", "not", "result", "and", "raise_error", "is", "not", "None", ":", "raise", "raise_error", "return", "result", "return", "user_passes_test", "(", "check_theme", ",", "login_url", "=", "redirect_to", ")" ]
25.076923
22.205128
def create_sdb(self, name, category_id, owner, description="", user_group_permissions=None, iam_principal_permissions=None): """Create a safe deposit box. You need to refresh your token before the iam role is granted permission to the new safe deposit box. Keyword arguments: name (string) -- name of the safe deposit box category_id (string) -- category id that determines where to store the sdb. (ex: shared, applications) owner (string) -- AD group that owns the safe deposit box description (string) -- Description of the safe deposit box user_group_permissions (list) -- list of dictionaries containing the key name and maybe role_id iam_principal_permissions (list) -- list of dictionaries containing the key name iam_principal_arn and role_id """ # Do some sanity checking if user_group_permissions is None: user_group_permissions = [] if iam_principal_permissions is None: iam_principal_permissions = [] if list != type(user_group_permissions): raise(TypeError('Expected list, but got ' + str(type(user_group_permissions)))) if list != type(iam_principal_permissions): raise(TypeError('Expected list, but got ' + str(type(iam_principal_permissions)))) temp_data = { "name": name, "description": description, "category_id": category_id, "owner": owner, } if len(user_group_permissions) > 0: temp_data["user_group_permissions"] = user_group_permissions if len(iam_principal_permissions) > 0: temp_data["iam_principal_permissions"] = iam_principal_permissions data = json.encoder.JSONEncoder().encode(temp_data) sdb_resp = post_with_retry(self.cerberus_url + '/v2/safe-deposit-box', data=str(data), headers=self.HEADERS) throw_if_bad_response(sdb_resp) return sdb_resp.json()
[ "def", "create_sdb", "(", "self", ",", "name", ",", "category_id", ",", "owner", ",", "description", "=", "\"\"", ",", "user_group_permissions", "=", "None", ",", "iam_principal_permissions", "=", "None", ")", ":", "# Do some sanity checking", "if", "user_group_permissions", "is", "None", ":", "user_group_permissions", "=", "[", "]", "if", "iam_principal_permissions", "is", "None", ":", "iam_principal_permissions", "=", "[", "]", "if", "list", "!=", "type", "(", "user_group_permissions", ")", ":", "raise", "(", "TypeError", "(", "'Expected list, but got '", "+", "str", "(", "type", "(", "user_group_permissions", ")", ")", ")", ")", "if", "list", "!=", "type", "(", "iam_principal_permissions", ")", ":", "raise", "(", "TypeError", "(", "'Expected list, but got '", "+", "str", "(", "type", "(", "iam_principal_permissions", ")", ")", ")", ")", "temp_data", "=", "{", "\"name\"", ":", "name", ",", "\"description\"", ":", "description", ",", "\"category_id\"", ":", "category_id", ",", "\"owner\"", ":", "owner", ",", "}", "if", "len", "(", "user_group_permissions", ")", ">", "0", ":", "temp_data", "[", "\"user_group_permissions\"", "]", "=", "user_group_permissions", "if", "len", "(", "iam_principal_permissions", ")", ">", "0", ":", "temp_data", "[", "\"iam_principal_permissions\"", "]", "=", "iam_principal_permissions", "data", "=", "json", ".", "encoder", ".", "JSONEncoder", "(", ")", ".", "encode", "(", "temp_data", ")", "sdb_resp", "=", "post_with_retry", "(", "self", ".", "cerberus_url", "+", "'/v2/safe-deposit-box'", ",", "data", "=", "str", "(", "data", ")", ",", "headers", "=", "self", ".", "HEADERS", ")", "throw_if_bad_response", "(", "sdb_resp", ")", "return", "sdb_resp", ".", "json", "(", ")" ]
51.384615
25.025641
def execute(self): """ This function Executes the program with set arguments. """ prog_cmd = self.get_cmd().strip() if prog_cmd == '': self.status = 'Failure' debug.log("Error: No program to execute for %s!"%self.name) debug.log(("Could not combine path and arguments into cmdline:" "\n%s %s)\n")%(self.path, ' '.join(self.args))) else: debug.log("\n\nExecute %s...\n%s" % (self.name, prog_cmd)) # Create shell script script = '%s.sh'%self.name if self.wdir != '': script = '%s/%s'%(self.wdir, script) else: script = '%s/%s'%(os.getcwd(), script) with open_(script, 'w') as f: f.write('#!/bin/bash\n') if self.wdir != '': f.write('cd {workdir}\n'.format(workdir=self.wdir)) f.write( ('touch {stdout} {stderr}\n' 'chmod a+r {stdout} {stderr}\n' '{cmd} 1> {stdout} 2> {stderr}\n' 'ec=$?\n').format( stdout=self.stdout, stderr=self.stderr, cmd=prog_cmd ) ) if not self.forcewait: f.write(('if [ "$ec" -ne "0" ]; then echo "Error" >> {stderr}; ' 'else echo "Done" >> {stderr}; fi\n').format( stderr=self.stderr)) f.write('exit $ec\n') os.chmod(script, 0o744) if self.queue is not None: # Setup execution of shell script through TORQUE other_args = '' if self.forcewait: other_args += "-K " # ADDING -K argument if wait() is forced # QSUB INFO :: run_time_limit(walltime, dd:hh:mm:ss), # memory(mem, up to 100GB *gigabyte), # processors(ppn, up to 16) # USE AS LITTLE AS NEEDED! cmd = ('/usr/bin/qsub ' '-l nodes=1:ppn={procs},walltime={hours}:00:00,mem={mem}g ' '-r y {workdir_arg} {other_args} {cmd}').format( procs=self.procs, hours=self.walltime, mem=self.mem, workdir_arg="-d %s"%(self.wdir) if self.wdir != '' else '', other_args=other_args, cmd=script) debug.log("\n\nTORQUE SETUP %s...\n%s\n" % (self.name, cmd)) else: cmd = script if self.server is not None: cmd = "ssh {server} {cmd}".format( server=self.server, cmd=quote(cmd) ) self.status = 'Executing' # EXECUTING PROGRAM self.update_timer(-time()) # TIME START if self.forcewait: self.p = Popen(cmd) ec = self.p.wait() if ec == 0: debug.log("Program finished successfully!") self.status = 'Done' else: debug.log("Program failed on execution!") self.status = 'Failure' self.p = None else: # WaitOn should be called to determine if the program has ended debug.log("CMD: %s"%cmd) self.p = Popen(cmd) # shell=True, executable="/bin/bash" self.update_timer(time()) # TIME END debug.log("timed: %s" % (self.get_time()))
[ "def", "execute", "(", "self", ")", ":", "prog_cmd", "=", "self", ".", "get_cmd", "(", ")", ".", "strip", "(", ")", "if", "prog_cmd", "==", "''", ":", "self", ".", "status", "=", "'Failure'", "debug", ".", "log", "(", "\"Error: No program to execute for %s!\"", "%", "self", ".", "name", ")", "debug", ".", "log", "(", "(", "\"Could not combine path and arguments into cmdline:\"", "\"\\n%s %s)\\n\"", ")", "%", "(", "self", ".", "path", ",", "' '", ".", "join", "(", "self", ".", "args", ")", ")", ")", "else", ":", "debug", ".", "log", "(", "\"\\n\\nExecute %s...\\n%s\"", "%", "(", "self", ".", "name", ",", "prog_cmd", ")", ")", "# Create shell script", "script", "=", "'%s.sh'", "%", "self", ".", "name", "if", "self", ".", "wdir", "!=", "''", ":", "script", "=", "'%s/%s'", "%", "(", "self", ".", "wdir", ",", "script", ")", "else", ":", "script", "=", "'%s/%s'", "%", "(", "os", ".", "getcwd", "(", ")", ",", "script", ")", "with", "open_", "(", "script", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "'#!/bin/bash\\n'", ")", "if", "self", ".", "wdir", "!=", "''", ":", "f", ".", "write", "(", "'cd {workdir}\\n'", ".", "format", "(", "workdir", "=", "self", ".", "wdir", ")", ")", "f", ".", "write", "(", "(", "'touch {stdout} {stderr}\\n'", "'chmod a+r {stdout} {stderr}\\n'", "'{cmd} 1> {stdout} 2> {stderr}\\n'", "'ec=$?\\n'", ")", ".", "format", "(", "stdout", "=", "self", ".", "stdout", ",", "stderr", "=", "self", ".", "stderr", ",", "cmd", "=", "prog_cmd", ")", ")", "if", "not", "self", ".", "forcewait", ":", "f", ".", "write", "(", "(", "'if [ \"$ec\" -ne \"0\" ]; then echo \"Error\" >> {stderr}; '", "'else echo \"Done\" >> {stderr}; fi\\n'", ")", ".", "format", "(", "stderr", "=", "self", ".", "stderr", ")", ")", "f", ".", "write", "(", "'exit $ec\\n'", ")", "os", ".", "chmod", "(", "script", ",", "0o744", ")", "if", "self", ".", "queue", "is", "not", "None", ":", "# Setup execution of shell script through TORQUE", "other_args", "=", "''", "if", "self", ".", "forcewait", ":", "other_args", "+=", "\"-K \"", "# ADDING -K argument if wait() is forced", "# QSUB INFO :: run_time_limit(walltime, dd:hh:mm:ss),", "# memory(mem, up to 100GB *gigabyte),", "# processors(ppn, up to 16) # USE AS LITTLE AS NEEDED!", "cmd", "=", "(", "'/usr/bin/qsub '", "'-l nodes=1:ppn={procs},walltime={hours}:00:00,mem={mem}g '", "'-r y {workdir_arg} {other_args} {cmd}'", ")", ".", "format", "(", "procs", "=", "self", ".", "procs", ",", "hours", "=", "self", ".", "walltime", ",", "mem", "=", "self", ".", "mem", ",", "workdir_arg", "=", "\"-d %s\"", "%", "(", "self", ".", "wdir", ")", "if", "self", ".", "wdir", "!=", "''", "else", "''", ",", "other_args", "=", "other_args", ",", "cmd", "=", "script", ")", "debug", ".", "log", "(", "\"\\n\\nTORQUE SETUP %s...\\n%s\\n\"", "%", "(", "self", ".", "name", ",", "cmd", ")", ")", "else", ":", "cmd", "=", "script", "if", "self", ".", "server", "is", "not", "None", ":", "cmd", "=", "\"ssh {server} {cmd}\"", ".", "format", "(", "server", "=", "self", ".", "server", ",", "cmd", "=", "quote", "(", "cmd", ")", ")", "self", ".", "status", "=", "'Executing'", "# EXECUTING PROGRAM", "self", ".", "update_timer", "(", "-", "time", "(", ")", ")", "# TIME START", "if", "self", ".", "forcewait", ":", "self", ".", "p", "=", "Popen", "(", "cmd", ")", "ec", "=", "self", ".", "p", ".", "wait", "(", ")", "if", "ec", "==", "0", ":", "debug", ".", "log", "(", "\"Program finished successfully!\"", ")", "self", ".", "status", "=", "'Done'", "else", ":", "debug", ".", "log", "(", "\"Program failed on execution!\"", ")", "self", ".", "status", "=", "'Failure'", "self", ".", "p", "=", "None", "else", ":", "# WaitOn should be called to determine if the program has ended", "debug", ".", "log", "(", "\"CMD: %s\"", "%", "cmd", ")", "self", ".", "p", "=", "Popen", "(", "cmd", ")", "# shell=True, executable=\"/bin/bash\"", "self", ".", "update_timer", "(", "time", "(", ")", ")", "# TIME END", "debug", ".", "log", "(", "\"timed: %s\"", "%", "(", "self", ".", "get_time", "(", ")", ")", ")" ]
41.775
15.4
def bind(self, args, kwargs): """ Bind arguments and keyword arguments to the encapsulated function. Returns a dictionary of parameters (named according to function parameters) with the values that were bound to each name. """ spec = self._spec resolution = self.resolve(args, kwargs) params = dict(zip(spec.args, resolution.slots)) if spec.varargs: params[spec.varargs] = resolution.varargs if spec.varkw: params[spec.varkw] = resolution.varkw if spec.kwonlyargs: params.update(resolution.kwonlyargs) return params
[ "def", "bind", "(", "self", ",", "args", ",", "kwargs", ")", ":", "spec", "=", "self", ".", "_spec", "resolution", "=", "self", ".", "resolve", "(", "args", ",", "kwargs", ")", "params", "=", "dict", "(", "zip", "(", "spec", ".", "args", ",", "resolution", ".", "slots", ")", ")", "if", "spec", ".", "varargs", ":", "params", "[", "spec", ".", "varargs", "]", "=", "resolution", ".", "varargs", "if", "spec", ".", "varkw", ":", "params", "[", "spec", ".", "varkw", "]", "=", "resolution", ".", "varkw", "if", "spec", ".", "kwonlyargs", ":", "params", ".", "update", "(", "resolution", ".", "kwonlyargs", ")", "return", "params" ]
37.176471
16.117647
def _pin_mongos(self, server): """Pin this session to the given mongos Server.""" self._transaction.sharded = True self._transaction.pinned_address = server.description.address
[ "def", "_pin_mongos", "(", "self", ",", "server", ")", ":", "self", ".", "_transaction", ".", "sharded", "=", "True", "self", ".", "_transaction", ".", "pinned_address", "=", "server", ".", "description", ".", "address" ]
49.25
9.75
def read_ipv6(self, length): """Read Internet Protocol version 6 (IPv6). Structure of IPv6 header [RFC 2460]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |Version| Traffic Class | Flow Label | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Payload Length | Next Header | Hop Limit | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | + + | | + Source Address + | | + + | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | + + | | + Destination Address + | | + + | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 ip.version Version (6) 0 4 ip.class Traffic Class 1 12 ip.label Flow Label 4 32 ip.payload Payload Length (header excludes) 6 48 ip.next Next Header 7 56 ip.limit Hop Limit 8 64 ip.src Source Address 24 192 ip.dst Destination Address """ if length is None: length = len(self) _htet = self._read_ip_hextet() _plen = self._read_unpack(2) _next = self._read_protos(1) _hlmt = self._read_unpack(1) _srca = self._read_ip_addr() _dsta = self._read_ip_addr() ipv6 = dict( version=_htet[0], tclass=_htet[1], label=_htet[2], payload=_plen, next=_next, limit=_hlmt, src=_srca, dst=_dsta, ) hdr_len = 40 raw_len = ipv6['payload'] ipv6['packet'] = self._read_packet(header=hdr_len, payload=raw_len) return self._decode_next_layer(ipv6, _next, raw_len)
[ "def", "read_ipv6", "(", "self", ",", "length", ")", ":", "if", "length", "is", "None", ":", "length", "=", "len", "(", "self", ")", "_htet", "=", "self", ".", "_read_ip_hextet", "(", ")", "_plen", "=", "self", ".", "_read_unpack", "(", "2", ")", "_next", "=", "self", ".", "_read_protos", "(", "1", ")", "_hlmt", "=", "self", ".", "_read_unpack", "(", "1", ")", "_srca", "=", "self", ".", "_read_ip_addr", "(", ")", "_dsta", "=", "self", ".", "_read_ip_addr", "(", ")", "ipv6", "=", "dict", "(", "version", "=", "_htet", "[", "0", "]", ",", "tclass", "=", "_htet", "[", "1", "]", ",", "label", "=", "_htet", "[", "2", "]", ",", "payload", "=", "_plen", ",", "next", "=", "_next", ",", "limit", "=", "_hlmt", ",", "src", "=", "_srca", ",", "dst", "=", "_dsta", ",", ")", "hdr_len", "=", "40", "raw_len", "=", "ipv6", "[", "'payload'", "]", "ipv6", "[", "'packet'", "]", "=", "self", ".", "_read_packet", "(", "header", "=", "hdr_len", ",", "payload", "=", "raw_len", ")", "return", "self", ".", "_decode_next_layer", "(", "ipv6", ",", "_next", ",", "raw_len", ")" ]
48.954545
26.651515
def left(self, num=None): """ NOT REQUIRED, BUT EXISTS AS OPPOSITE OF right() """ if num == None: return FlatList([_get_list(self)[0]]) if num <= 0: return Null return FlatList(_get_list(self)[:num])
[ "def", "left", "(", "self", ",", "num", "=", "None", ")", ":", "if", "num", "==", "None", ":", "return", "FlatList", "(", "[", "_get_list", "(", "self", ")", "[", "0", "]", "]", ")", "if", "num", "<=", "0", ":", "return", "Null", "return", "FlatList", "(", "_get_list", "(", "self", ")", "[", ":", "num", "]", ")" ]
26.3
13.9
def plot_fit(self,**kwargs): """ Plots the fit of the model Returns ---------- None (plots data and the fit) """ import matplotlib.pyplot as plt import seaborn as sns figsize = kwargs.get('figsize',(10,7)) if self.latent_variables.estimated is False: raise Exception("No latent variables estimated!") else: date_index = self.index[self.lags:self.data[0].shape[0]] mu, Y = self._model(self.latent_variables.get_z_values()) for series in range(0,Y.shape[0]): plt.figure(figsize=figsize) plt.plot(date_index,Y[series],label='Data ' + str(series)) plt.plot(date_index,mu[series],label='Filter' + str(series),c='black') plt.title(self.data_name[series]) plt.legend(loc=2) plt.show()
[ "def", "plot_fit", "(", "self", ",", "*", "*", "kwargs", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "import", "seaborn", "as", "sns", "figsize", "=", "kwargs", ".", "get", "(", "'figsize'", ",", "(", "10", ",", "7", ")", ")", "if", "self", ".", "latent_variables", ".", "estimated", "is", "False", ":", "raise", "Exception", "(", "\"No latent variables estimated!\"", ")", "else", ":", "date_index", "=", "self", ".", "index", "[", "self", ".", "lags", ":", "self", ".", "data", "[", "0", "]", ".", "shape", "[", "0", "]", "]", "mu", ",", "Y", "=", "self", ".", "_model", "(", "self", ".", "latent_variables", ".", "get_z_values", "(", ")", ")", "for", "series", "in", "range", "(", "0", ",", "Y", ".", "shape", "[", "0", "]", ")", ":", "plt", ".", "figure", "(", "figsize", "=", "figsize", ")", "plt", ".", "plot", "(", "date_index", ",", "Y", "[", "series", "]", ",", "label", "=", "'Data '", "+", "str", "(", "series", ")", ")", "plt", ".", "plot", "(", "date_index", ",", "mu", "[", "series", "]", ",", "label", "=", "'Filter'", "+", "str", "(", "series", ")", ",", "c", "=", "'black'", ")", "plt", ".", "title", "(", "self", ".", "data_name", "[", "series", "]", ")", "plt", ".", "legend", "(", "loc", "=", "2", ")", "plt", ".", "show", "(", ")" ]
36.75
18.291667
def PopEvent(self): """Pops an event from the heap. Returns: tuple: containing: int: event timestamp or None if the heap is empty bytes: serialized event or None if the heap is empty """ try: timestamp, serialized_event = heapq.heappop(self._heap) self.data_size -= len(serialized_event) return timestamp, serialized_event except IndexError: return None, None
[ "def", "PopEvent", "(", "self", ")", ":", "try", ":", "timestamp", ",", "serialized_event", "=", "heapq", ".", "heappop", "(", "self", ".", "_heap", ")", "self", ".", "data_size", "-=", "len", "(", "serialized_event", ")", "return", "timestamp", ",", "serialized_event", "except", "IndexError", ":", "return", "None", ",", "None" ]
24.294118
20.882353
def move_to(self, folder): """Moves the email to the folder specified by the folder parameter. Args: folder: A string containing the folder ID the message should be moved to, or a Folder instance """ if isinstance(folder, Folder): self.move_to(folder.id) else: self._move_to(folder)
[ "def", "move_to", "(", "self", ",", "folder", ")", ":", "if", "isinstance", "(", "folder", ",", "Folder", ")", ":", "self", ".", "move_to", "(", "folder", ".", "id", ")", "else", ":", "self", ".", "_move_to", "(", "folder", ")" ]
31.818182
20.727273
def removecolkeyword(self, columnname, keyword): """Remove a column keyword. It is similar to :func:`removekeyword`. """ if isinstance(keyword, str): self._removekeyword(columnname, keyword, -1) else: self._removekeyword(columnname, '', keyword)
[ "def", "removecolkeyword", "(", "self", ",", "columnname", ",", "keyword", ")", ":", "if", "isinstance", "(", "keyword", ",", "str", ")", ":", "self", ".", "_removekeyword", "(", "columnname", ",", "keyword", ",", "-", "1", ")", "else", ":", "self", ".", "_removekeyword", "(", "columnname", ",", "''", ",", "keyword", ")" ]
30.2
15.8
def stats_evaluation(stats): """Generate an evaluation for the given pylint ``stats``.""" statement = stats.get('statement') error = stats.get('error', 0) warning = stats.get('warning', 0) refactor = stats.get('refactor', 0) convention = stats.get('convention', 0) if not statement or statement <= 0: return None malus = float(5 * error + warning + refactor + convention) malus_ratio = malus / statement return 10.0 - (malus_ratio * 10)
[ "def", "stats_evaluation", "(", "stats", ")", ":", "statement", "=", "stats", ".", "get", "(", "'statement'", ")", "error", "=", "stats", ".", "get", "(", "'error'", ",", "0", ")", "warning", "=", "stats", ".", "get", "(", "'warning'", ",", "0", ")", "refactor", "=", "stats", ".", "get", "(", "'refactor'", ",", "0", ")", "convention", "=", "stats", ".", "get", "(", "'convention'", ",", "0", ")", "if", "not", "statement", "or", "statement", "<=", "0", ":", "return", "None", "malus", "=", "float", "(", "5", "*", "error", "+", "warning", "+", "refactor", "+", "convention", ")", "malus_ratio", "=", "malus", "/", "statement", "return", "10.0", "-", "(", "malus_ratio", "*", "10", ")" ]
33.785714
11.5
def translate(self): """Compile the variable lookup.""" ident = self.ident expr = ex_rvalue(VARIABLE_PREFIX + ident) return [expr], set([ident]), set()
[ "def", "translate", "(", "self", ")", ":", "ident", "=", "self", ".", "ident", "expr", "=", "ex_rvalue", "(", "VARIABLE_PREFIX", "+", "ident", ")", "return", "[", "expr", "]", ",", "set", "(", "[", "ident", "]", ")", ",", "set", "(", ")" ]
35.8
9
def parse_cov(cov_table, scaffold2genome): """ calculate genome coverage from scaffold coverage table """ size = {} # size[genome] = genome size mapped = {} # mapped[genome][sample] = mapped bases # parse coverage files for line in open(cov_table): line = line.strip().split('\t') if line[0].startswith('#'): samples = line[1:] samples = [i.rsplit('/', 1)[-1].split('.', 1)[0] for i in samples] continue scaffold, length = line[0].split(': ') length = float(length) covs = [float(i) for i in line[1:]] bases = [c * length for c in covs] if scaffold not in scaffold2genome: continue genome = scaffold2genome[scaffold] if genome not in size: size[genome] = 0 mapped[genome] = {sample:0 for sample in samples} # keep track of genome size size[genome] += length # keep track of number of mapped bases for sample, count in zip(samples, bases): mapped[genome][sample] += count # calculate coverage from base counts and genome size coverage = {'genome':[], 'genome size (bp)':[], 'sample':[], 'coverage':[]} for genome, length in size.items(): for sample in samples: cov = mapped[genome][sample] / length coverage['genome'].append(genome) coverage['genome size (bp)'].append(length) coverage['sample'].append(sample) coverage['coverage'].append(cov) return pd.DataFrame(coverage)
[ "def", "parse_cov", "(", "cov_table", ",", "scaffold2genome", ")", ":", "size", "=", "{", "}", "# size[genome] = genome size", "mapped", "=", "{", "}", "# mapped[genome][sample] = mapped bases", "# parse coverage files", "for", "line", "in", "open", "(", "cov_table", ")", ":", "line", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "'\\t'", ")", "if", "line", "[", "0", "]", ".", "startswith", "(", "'#'", ")", ":", "samples", "=", "line", "[", "1", ":", "]", "samples", "=", "[", "i", ".", "rsplit", "(", "'/'", ",", "1", ")", "[", "-", "1", "]", ".", "split", "(", "'.'", ",", "1", ")", "[", "0", "]", "for", "i", "in", "samples", "]", "continue", "scaffold", ",", "length", "=", "line", "[", "0", "]", ".", "split", "(", "': '", ")", "length", "=", "float", "(", "length", ")", "covs", "=", "[", "float", "(", "i", ")", "for", "i", "in", "line", "[", "1", ":", "]", "]", "bases", "=", "[", "c", "*", "length", "for", "c", "in", "covs", "]", "if", "scaffold", "not", "in", "scaffold2genome", ":", "continue", "genome", "=", "scaffold2genome", "[", "scaffold", "]", "if", "genome", "not", "in", "size", ":", "size", "[", "genome", "]", "=", "0", "mapped", "[", "genome", "]", "=", "{", "sample", ":", "0", "for", "sample", "in", "samples", "}", "# keep track of genome size", "size", "[", "genome", "]", "+=", "length", "# keep track of number of mapped bases", "for", "sample", ",", "count", "in", "zip", "(", "samples", ",", "bases", ")", ":", "mapped", "[", "genome", "]", "[", "sample", "]", "+=", "count", "# calculate coverage from base counts and genome size", "coverage", "=", "{", "'genome'", ":", "[", "]", ",", "'genome size (bp)'", ":", "[", "]", ",", "'sample'", ":", "[", "]", ",", "'coverage'", ":", "[", "]", "}", "for", "genome", ",", "length", "in", "size", ".", "items", "(", ")", ":", "for", "sample", "in", "samples", ":", "cov", "=", "mapped", "[", "genome", "]", "[", "sample", "]", "/", "length", "coverage", "[", "'genome'", "]", ".", "append", "(", "genome", ")", "coverage", "[", "'genome size (bp)'", "]", ".", "append", "(", "length", ")", "coverage", "[", "'sample'", "]", ".", "append", "(", "sample", ")", "coverage", "[", "'coverage'", "]", ".", "append", "(", "cov", ")", "return", "pd", ".", "DataFrame", "(", "coverage", ")" ]
40.473684
9.736842
def gsea_edb_parser(results_path, index=0): """Parse results.edb file stored under **edb** file folder. :param results_path: the .results file located inside edb folder. :param index: gene_set index of gmt database, used for iterating items. :return: enrichment_term, hit_index,nes, pval, fdr. """ from bs4 import BeautifulSoup soup = BeautifulSoup(open(results_path), features='xml') tag = soup.findAll('DTG') term = dict(tag[index].attrs) # dict_keys(['RANKED_LIST', 'GENESET', 'FWER', 'ES_PROFILE', # 'HIT_INDICES', 'ES', 'NES', 'TEMPLATE', 'RND_ES', 'RANK_SCORE_AT_ES', # 'NP', 'RANK_AT_ES', 'FDR']) enrich_term = term.get('GENESET').split("#")[1] es_profile = term.get('ES_PROFILE').split(" ") # rank_es = term.get('RND_ES').split(" ") hit_ind =term.get('HIT_INDICES').split(" ") es_profile = [float(i) for i in es_profile ] hit_ind = [float(i) for i in hit_ind ] #r ank_es = [float(i) for i in rank_es ] nes = term.get('NES') pval = term.get('NP') fdr = term.get('FDR') # fwer = term.get('FWER') # index_range = len(tag)-1 logging.debug("Enriched Gene set is: "+ enrich_term) return enrich_term, hit_ind, nes, pval, fdr
[ "def", "gsea_edb_parser", "(", "results_path", ",", "index", "=", "0", ")", ":", "from", "bs4", "import", "BeautifulSoup", "soup", "=", "BeautifulSoup", "(", "open", "(", "results_path", ")", ",", "features", "=", "'xml'", ")", "tag", "=", "soup", ".", "findAll", "(", "'DTG'", ")", "term", "=", "dict", "(", "tag", "[", "index", "]", ".", "attrs", ")", "# dict_keys(['RANKED_LIST', 'GENESET', 'FWER', 'ES_PROFILE',", "# 'HIT_INDICES', 'ES', 'NES', 'TEMPLATE', 'RND_ES', 'RANK_SCORE_AT_ES',", "# 'NP', 'RANK_AT_ES', 'FDR'])", "enrich_term", "=", "term", ".", "get", "(", "'GENESET'", ")", ".", "split", "(", "\"#\"", ")", "[", "1", "]", "es_profile", "=", "term", ".", "get", "(", "'ES_PROFILE'", ")", ".", "split", "(", "\" \"", ")", "# rank_es = term.get('RND_ES').split(\" \")", "hit_ind", "=", "term", ".", "get", "(", "'HIT_INDICES'", ")", ".", "split", "(", "\" \"", ")", "es_profile", "=", "[", "float", "(", "i", ")", "for", "i", "in", "es_profile", "]", "hit_ind", "=", "[", "float", "(", "i", ")", "for", "i", "in", "hit_ind", "]", "#r ank_es = [float(i) for i in rank_es ]", "nes", "=", "term", ".", "get", "(", "'NES'", ")", "pval", "=", "term", ".", "get", "(", "'NP'", ")", "fdr", "=", "term", ".", "get", "(", "'FDR'", ")", "# fwer = term.get('FWER')", "# index_range = len(tag)-1", "logging", ".", "debug", "(", "\"Enriched Gene set is: \"", "+", "enrich_term", ")", "return", "enrich_term", ",", "hit_ind", ",", "nes", ",", "pval", ",", "fdr" ]
40.133333
14.933333
def to_json(self): """Return a JSON-serializable representation.""" return { 'network': self.network, 'state': self.state, 'nodes': self.node_indices, 'cut': self.cut, }
[ "def", "to_json", "(", "self", ")", ":", "return", "{", "'network'", ":", "self", ".", "network", ",", "'state'", ":", "self", ".", "state", ",", "'nodes'", ":", "self", ".", "node_indices", ",", "'cut'", ":", "self", ".", "cut", ",", "}" ]
29.25
12.75
def write_geno(data, sidx): """ write the geno output formerly used by admixture, still supported by adegenet, perhaps. Also, sNMF still likes .geno. """ ## grab snp and bis data from tmparr start = time.time() tmparrs = os.path.join(data.dirs.outfiles, "tmp-{}.h5".format(data.name)) with h5py.File(tmparrs, 'r') as io5: snparr = io5["snparr"] bisarr = io5["bisarr"] ## trim to size b/c it was made longer than actual bend = np.where(np.all(bisarr[:] == "", axis=0))[0] if np.any(bend): bend = bend.min() else: bend = bisarr.shape[1] send = np.where(np.all(snparr[:] == "", axis=0))[0] if np.any(send): send = send.min() else: send = snparr.shape[1] ## get most common base at each SNP as a pseudo-reference ## and record 0,1,2 or missing=9 for counts of the ref allele snpref = reftrick(snparr[:, :send].view(np.int8), GETCONS).view("S1") bisref = reftrick(bisarr[:, :bend].view(np.int8), GETCONS).view("S1") ## geno matrix to fill (9 is empty) snpgeno = np.zeros((snparr.shape[0], send), dtype=np.uint8) snpgeno.fill(9) bisgeno = np.zeros((bisarr.shape[0], bend), dtype=np.uint8) bisgeno.fill(9) ##-------------------------------------------------------------------- ## fill in complete hits (match to first column ref base) mask2 = np.array(snparr[:, :send] == snpref[:, 0]) snpgeno[mask2] = 2 ## fill in single hits (heteros) match to hetero of first+second column ambref = np.apply_along_axis(lambda x: TRANSFULL[tuple(x)], 1, snpref[:, :2]) mask1 = np.array(snparr[:, :send] == ambref) snpgeno[mask1] = 1 ## fill in zero hits, meaning a perfect match to the second column base ## anything else is left at 9 (missing), b/c it's either missing or it ## is not bi-allelic. mask0 = np.array(snparr[:, :send] == snpref[:, 1]) snpgeno[mask0] = 0 ##-------------------------------------------------------------------- ## fill in complete hits mask2 = np.array(bisarr[:, :bend] == bisref[:, 0]) bisgeno[mask2] = 2 ## fill in single hits (heteros) ambref = np.apply_along_axis(lambda x: TRANSFULL[tuple(x)], 1, bisref[:, :2]) mask1 = np.array(bisarr[:, :bend] == ambref) bisgeno[mask1] = 1 ## fill in zero hits (match to second base) mask0 = np.array(bisarr[:, :bend] == bisref[:, 1]) bisgeno[mask0] = 0 ##--------------------------------------------------------------------- ## print to files np.savetxt(data.outfiles.geno, snpgeno.T, delimiter="", fmt="%d") np.savetxt(data.outfiles.ugeno, bisgeno.T, delimiter="", fmt="%d") LOGGER.debug("finished writing geno in: %s", time.time() - start)
[ "def", "write_geno", "(", "data", ",", "sidx", ")", ":", "## grab snp and bis data from tmparr", "start", "=", "time", ".", "time", "(", ")", "tmparrs", "=", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "outfiles", ",", "\"tmp-{}.h5\"", ".", "format", "(", "data", ".", "name", ")", ")", "with", "h5py", ".", "File", "(", "tmparrs", ",", "'r'", ")", "as", "io5", ":", "snparr", "=", "io5", "[", "\"snparr\"", "]", "bisarr", "=", "io5", "[", "\"bisarr\"", "]", "## trim to size b/c it was made longer than actual", "bend", "=", "np", ".", "where", "(", "np", ".", "all", "(", "bisarr", "[", ":", "]", "==", "\"\"", ",", "axis", "=", "0", ")", ")", "[", "0", "]", "if", "np", ".", "any", "(", "bend", ")", ":", "bend", "=", "bend", ".", "min", "(", ")", "else", ":", "bend", "=", "bisarr", ".", "shape", "[", "1", "]", "send", "=", "np", ".", "where", "(", "np", ".", "all", "(", "snparr", "[", ":", "]", "==", "\"\"", ",", "axis", "=", "0", ")", ")", "[", "0", "]", "if", "np", ".", "any", "(", "send", ")", ":", "send", "=", "send", ".", "min", "(", ")", "else", ":", "send", "=", "snparr", ".", "shape", "[", "1", "]", "## get most common base at each SNP as a pseudo-reference", "## and record 0,1,2 or missing=9 for counts of the ref allele", "snpref", "=", "reftrick", "(", "snparr", "[", ":", ",", ":", "send", "]", ".", "view", "(", "np", ".", "int8", ")", ",", "GETCONS", ")", ".", "view", "(", "\"S1\"", ")", "bisref", "=", "reftrick", "(", "bisarr", "[", ":", ",", ":", "bend", "]", ".", "view", "(", "np", ".", "int8", ")", ",", "GETCONS", ")", ".", "view", "(", "\"S1\"", ")", "## geno matrix to fill (9 is empty)", "snpgeno", "=", "np", ".", "zeros", "(", "(", "snparr", ".", "shape", "[", "0", "]", ",", "send", ")", ",", "dtype", "=", "np", ".", "uint8", ")", "snpgeno", ".", "fill", "(", "9", ")", "bisgeno", "=", "np", ".", "zeros", "(", "(", "bisarr", ".", "shape", "[", "0", "]", ",", "bend", ")", ",", "dtype", "=", "np", ".", "uint8", ")", "bisgeno", ".", "fill", "(", "9", ")", "##--------------------------------------------------------------------", "## fill in complete hits (match to first column ref base)", "mask2", "=", "np", ".", "array", "(", "snparr", "[", ":", ",", ":", "send", "]", "==", "snpref", "[", ":", ",", "0", "]", ")", "snpgeno", "[", "mask2", "]", "=", "2", "## fill in single hits (heteros) match to hetero of first+second column", "ambref", "=", "np", ".", "apply_along_axis", "(", "lambda", "x", ":", "TRANSFULL", "[", "tuple", "(", "x", ")", "]", ",", "1", ",", "snpref", "[", ":", ",", ":", "2", "]", ")", "mask1", "=", "np", ".", "array", "(", "snparr", "[", ":", ",", ":", "send", "]", "==", "ambref", ")", "snpgeno", "[", "mask1", "]", "=", "1", "## fill in zero hits, meaning a perfect match to the second column base", "## anything else is left at 9 (missing), b/c it's either missing or it", "## is not bi-allelic. ", "mask0", "=", "np", ".", "array", "(", "snparr", "[", ":", ",", ":", "send", "]", "==", "snpref", "[", ":", ",", "1", "]", ")", "snpgeno", "[", "mask0", "]", "=", "0", "##--------------------------------------------------------------------", "## fill in complete hits", "mask2", "=", "np", ".", "array", "(", "bisarr", "[", ":", ",", ":", "bend", "]", "==", "bisref", "[", ":", ",", "0", "]", ")", "bisgeno", "[", "mask2", "]", "=", "2", "## fill in single hits (heteros)", "ambref", "=", "np", ".", "apply_along_axis", "(", "lambda", "x", ":", "TRANSFULL", "[", "tuple", "(", "x", ")", "]", ",", "1", ",", "bisref", "[", ":", ",", ":", "2", "]", ")", "mask1", "=", "np", ".", "array", "(", "bisarr", "[", ":", ",", ":", "bend", "]", "==", "ambref", ")", "bisgeno", "[", "mask1", "]", "=", "1", "## fill in zero hits (match to second base)", "mask0", "=", "np", ".", "array", "(", "bisarr", "[", ":", ",", ":", "bend", "]", "==", "bisref", "[", ":", ",", "1", "]", ")", "bisgeno", "[", "mask0", "]", "=", "0", "##---------------------------------------------------------------------", "## print to files", "np", ".", "savetxt", "(", "data", ".", "outfiles", ".", "geno", ",", "snpgeno", ".", "T", ",", "delimiter", "=", "\"\"", ",", "fmt", "=", "\"%d\"", ")", "np", ".", "savetxt", "(", "data", ".", "outfiles", ".", "ugeno", ",", "bisgeno", ".", "T", ",", "delimiter", "=", "\"\"", ",", "fmt", "=", "\"%d\"", ")", "LOGGER", ".", "debug", "(", "\"finished writing geno in: %s\"", ",", "time", ".", "time", "(", ")", "-", "start", ")" ]
39.849315
23.465753
def parse_backend(backend): """Converts the "backend" into the database connection parameters. It returns a (scheme, host, params) tuple.""" r = urlparse.urlsplit(backend) scheme, host = r.scheme, r.netloc path, query = r.path, r.query if path and not query: query, path = path, '' if query: if query.find('?'): path = query else: query = query[1:] if query: params = dict(urlparse.parse_qsl(query)) else: params = {} return scheme, host, params
[ "def", "parse_backend", "(", "backend", ")", ":", "r", "=", "urlparse", ".", "urlsplit", "(", "backend", ")", "scheme", ",", "host", "=", "r", ".", "scheme", ",", "r", ".", "netloc", "path", ",", "query", "=", "r", ".", "path", ",", "r", ".", "query", "if", "path", "and", "not", "query", ":", "query", ",", "path", "=", "path", ",", "''", "if", "query", ":", "if", "query", ".", "find", "(", "'?'", ")", ":", "path", "=", "query", "else", ":", "query", "=", "query", "[", "1", ":", "]", "if", "query", ":", "params", "=", "dict", "(", "urlparse", ".", "parse_qsl", "(", "query", ")", ")", "else", ":", "params", "=", "{", "}", "return", "scheme", ",", "host", ",", "params" ]
29.789474
13.105263
def threshold_monitor_hidden_threshold_monitor_Memory_high_limit(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") threshold_monitor_hidden = ET.SubElement(config, "threshold-monitor-hidden", xmlns="urn:brocade.com:mgmt:brocade-threshold-monitor") threshold_monitor = ET.SubElement(threshold_monitor_hidden, "threshold-monitor") Memory = ET.SubElement(threshold_monitor, "Memory") high_limit = ET.SubElement(Memory, "high-limit") high_limit.text = kwargs.pop('high_limit') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "threshold_monitor_hidden_threshold_monitor_Memory_high_limit", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "threshold_monitor_hidden", "=", "ET", ".", "SubElement", "(", "config", ",", "\"threshold-monitor-hidden\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-threshold-monitor\"", ")", "threshold_monitor", "=", "ET", ".", "SubElement", "(", "threshold_monitor_hidden", ",", "\"threshold-monitor\"", ")", "Memory", "=", "ET", ".", "SubElement", "(", "threshold_monitor", ",", "\"Memory\"", ")", "high_limit", "=", "ET", ".", "SubElement", "(", "Memory", ",", "\"high-limit\"", ")", "high_limit", ".", "text", "=", "kwargs", ".", "pop", "(", "'high_limit'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
53.333333
25.25
def enterEvent(self, event): """ Reimplemented to cancel the hide timer. """ super(CallTipWidget, self).enterEvent(event) self._hide_timer.stop()
[ "def", "enterEvent", "(", "self", ",", "event", ")", ":", "super", "(", "CallTipWidget", ",", "self", ")", ".", "enterEvent", "(", "event", ")", "self", ".", "_hide_timer", ".", "stop", "(", ")" ]
34.6
6.6
def operations(self): """Instance depends on the API version: * 2018-03-31: :class:`Operations<azure.mgmt.containerservice.v2018_03_31.operations.Operations>` * 2018-08-01-preview: :class:`Operations<azure.mgmt.containerservice.v2018_08_01_preview.operations.Operations>` * 2019-02-01: :class:`Operations<azure.mgmt.containerservice.v2019_02_01.operations.Operations>` """ api_version = self._get_api_version('operations') if api_version == '2018-03-31': from .v2018_03_31.operations import Operations as OperationClass elif api_version == '2018-08-01-preview': from .v2018_08_01_preview.operations import Operations as OperationClass elif api_version == '2019-02-01': from .v2019_02_01.operations import Operations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
[ "def", "operations", "(", "self", ")", ":", "api_version", "=", "self", ".", "_get_api_version", "(", "'operations'", ")", "if", "api_version", "==", "'2018-03-31'", ":", "from", ".", "v2018_03_31", ".", "operations", "import", "Operations", "as", "OperationClass", "elif", "api_version", "==", "'2018-08-01-preview'", ":", "from", ".", "v2018_08_01_preview", ".", "operations", "import", "Operations", "as", "OperationClass", "elif", "api_version", "==", "'2019-02-01'", ":", "from", ".", "v2019_02_01", ".", "operations", "import", "Operations", "as", "OperationClass", "else", ":", "raise", "NotImplementedError", "(", "\"APIVersion {} is not available\"", ".", "format", "(", "api_version", ")", ")", "return", "OperationClass", "(", "self", ".", "_client", ",", "self", ".", "config", ",", "Serializer", "(", "self", ".", "_models_dict", "(", "api_version", ")", ")", ",", "Deserializer", "(", "self", ".", "_models_dict", "(", "api_version", ")", ")", ")" ]
64
35.529412
def query_total_cat_recent(cat_id_arr, label=None, num=8, kind='1'): ''' :param cat_id_arr: list of categories. ['0101', '0102'] :param label: the additional label ''' if label: return MPost.__query_with_label( cat_id_arr, label=label, num=num, kind=kind ) return MPost.query_total_cat_recent_no_label(cat_id_arr, num=num, kind=kind)
[ "def", "query_total_cat_recent", "(", "cat_id_arr", ",", "label", "=", "None", ",", "num", "=", "8", ",", "kind", "=", "'1'", ")", ":", "if", "label", ":", "return", "MPost", ".", "__query_with_label", "(", "cat_id_arr", ",", "label", "=", "label", ",", "num", "=", "num", ",", "kind", "=", "kind", ")", "return", "MPost", ".", "query_total_cat_recent_no_label", "(", "cat_id_arr", ",", "num", "=", "num", ",", "kind", "=", "kind", ")" ]
35.307692
20.538462
def gene_ids(self): """ Return IDs of all genes which overlap this variant. Calling this method is significantly cheaper than calling `Variant.genes()`, which has to issue many more queries to construct each Gene object. """ return self.ensembl.gene_ids_at_locus( self.contig, self.start, self.end)
[ "def", "gene_ids", "(", "self", ")", ":", "return", "self", ".", "ensembl", ".", "gene_ids_at_locus", "(", "self", ".", "contig", ",", "self", ".", "start", ",", "self", ".", "end", ")" ]
43.875
16.375
def lub(self, other): """ Return the least upper bound for given intervals. :param other: AbstractInterval instance """ return self.__class__( [ max(self.lower, other.lower), max(self.upper, other.upper), ], lower_inc=self.lower_inc if self < other else other.lower_inc, upper_inc=self.upper_inc if self > other else other.upper_inc, )
[ "def", "lub", "(", "self", ",", "other", ")", ":", "return", "self", ".", "__class__", "(", "[", "max", "(", "self", ".", "lower", ",", "other", ".", "lower", ")", ",", "max", "(", "self", ".", "upper", ",", "other", ".", "upper", ")", ",", "]", ",", "lower_inc", "=", "self", ".", "lower_inc", "if", "self", "<", "other", "else", "other", ".", "lower_inc", ",", "upper_inc", "=", "self", ".", "upper_inc", "if", "self", ">", "other", "else", "other", ".", "upper_inc", ",", ")" ]
32.214286
18.214286
def _get_auto_rank(self, rank): """Tries to figure out what rank we should use for analyses""" if rank == "auto": # if we're an accessor for a ClassificationsDataFrame, use its _rank property if self.__class__.__name__ == "OneCodexAccessor": return self._rank if self._field == "abundance": return "species" else: return "genus" else: return rank
[ "def", "_get_auto_rank", "(", "self", ",", "rank", ")", ":", "if", "rank", "==", "\"auto\"", ":", "# if we're an accessor for a ClassificationsDataFrame, use its _rank property", "if", "self", ".", "__class__", ".", "__name__", "==", "\"OneCodexAccessor\"", ":", "return", "self", ".", "_rank", "if", "self", ".", "_field", "==", "\"abundance\"", ":", "return", "\"species\"", "else", ":", "return", "\"genus\"", "else", ":", "return", "rank" ]
33.357143
19.071429
def get_table_options(self, connection, table_name, schema, **kw): """ Return a dictionary of options specified when the table of the given name was created. Overrides interface :meth:`~sqlalchemy.engine.Inspector.get_table_options`. """ def keyfunc(column): num = int(column.sortkey) # If sortkey is interleaved, column numbers alternate # negative values, so take abs. return abs(num) table = self._get_redshift_relation(connection, table_name, schema, **kw) columns = self._get_redshift_columns(connection, table_name, schema, **kw) sortkey_cols = sorted([col for col in columns if col.sortkey], key=keyfunc) interleaved = any([int(col.sortkey) < 0 for col in sortkey_cols]) sortkey = [col.name for col in sortkey_cols] interleaved_sortkey = None if interleaved: interleaved_sortkey = sortkey sortkey = None distkeys = [col.name for col in columns if col.distkey] distkey = distkeys[0] if distkeys else None return { 'redshift_diststyle': table.diststyle, 'redshift_distkey': distkey, 'redshift_sortkey': sortkey, 'redshift_interleaved_sortkey': interleaved_sortkey, }
[ "def", "get_table_options", "(", "self", ",", "connection", ",", "table_name", ",", "schema", ",", "*", "*", "kw", ")", ":", "def", "keyfunc", "(", "column", ")", ":", "num", "=", "int", "(", "column", ".", "sortkey", ")", "# If sortkey is interleaved, column numbers alternate", "# negative values, so take abs.", "return", "abs", "(", "num", ")", "table", "=", "self", ".", "_get_redshift_relation", "(", "connection", ",", "table_name", ",", "schema", ",", "*", "*", "kw", ")", "columns", "=", "self", ".", "_get_redshift_columns", "(", "connection", ",", "table_name", ",", "schema", ",", "*", "*", "kw", ")", "sortkey_cols", "=", "sorted", "(", "[", "col", "for", "col", "in", "columns", "if", "col", ".", "sortkey", "]", ",", "key", "=", "keyfunc", ")", "interleaved", "=", "any", "(", "[", "int", "(", "col", ".", "sortkey", ")", "<", "0", "for", "col", "in", "sortkey_cols", "]", ")", "sortkey", "=", "[", "col", ".", "name", "for", "col", "in", "sortkey_cols", "]", "interleaved_sortkey", "=", "None", "if", "interleaved", ":", "interleaved_sortkey", "=", "sortkey", "sortkey", "=", "None", "distkeys", "=", "[", "col", ".", "name", "for", "col", "in", "columns", "if", "col", ".", "distkey", "]", "distkey", "=", "distkeys", "[", "0", "]", "if", "distkeys", "else", "None", "return", "{", "'redshift_diststyle'", ":", "table", ".", "diststyle", ",", "'redshift_distkey'", ":", "distkey", ",", "'redshift_sortkey'", ":", "sortkey", ",", "'redshift_interleaved_sortkey'", ":", "interleaved_sortkey", ",", "}" ]
43.121212
15.909091
def toner_status(self, filter_supported: bool = True) -> Dict[str, Any]: """Return the state of all toners cartridges.""" toner_status = {} for color in self.COLOR_NAMES: try: toner_stat = self.data.get( '{}_{}'.format(SyncThru.TONER, color), {}) if filter_supported and toner_stat.get('opt', 0) == 0: continue else: toner_status[color] = toner_stat except (KeyError, AttributeError): toner_status[color] = {} return toner_status
[ "def", "toner_status", "(", "self", ",", "filter_supported", ":", "bool", "=", "True", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "toner_status", "=", "{", "}", "for", "color", "in", "self", ".", "COLOR_NAMES", ":", "try", ":", "toner_stat", "=", "self", ".", "data", ".", "get", "(", "'{}_{}'", ".", "format", "(", "SyncThru", ".", "TONER", ",", "color", ")", ",", "{", "}", ")", "if", "filter_supported", "and", "toner_stat", ".", "get", "(", "'opt'", ",", "0", ")", "==", "0", ":", "continue", "else", ":", "toner_status", "[", "color", "]", "=", "toner_stat", "except", "(", "KeyError", ",", "AttributeError", ")", ":", "toner_status", "[", "color", "]", "=", "{", "}", "return", "toner_status" ]
43.5
13.714286
def generate_license(template, context): """ Generate a license by extracting variables from the template and replacing them with the corresponding values in the given context. """ out = StringIO() content = template.getvalue() for key in extract_vars(template): if key not in context: raise ValueError("%s is missing from the template context" % key) content = content.replace("{{ %s }}" % key, context[key]) template.close() # free template memory (when is garbage collected?) out.write(content) return out
[ "def", "generate_license", "(", "template", ",", "context", ")", ":", "out", "=", "StringIO", "(", ")", "content", "=", "template", ".", "getvalue", "(", ")", "for", "key", "in", "extract_vars", "(", "template", ")", ":", "if", "key", "not", "in", "context", ":", "raise", "ValueError", "(", "\"%s is missing from the template context\"", "%", "key", ")", "content", "=", "content", ".", "replace", "(", "\"{{ %s }}\"", "%", "key", ",", "context", "[", "key", "]", ")", "template", ".", "close", "(", ")", "# free template memory (when is garbage collected?)", "out", ".", "write", "(", "content", ")", "return", "out" ]
43.461538
16.307692
def create_component(self, name, description=None): """ Create a sub component in the business component. :param name: The new component's name. :param description: The new component's description. :returns: The created component. """ new_comp = Component(name, self.gl, description=description) new_comp.set_parent_path(self.path) self.components.append(new_comp) return new_comp
[ "def", "create_component", "(", "self", ",", "name", ",", "description", "=", "None", ")", ":", "new_comp", "=", "Component", "(", "name", ",", "self", ".", "gl", ",", "description", "=", "description", ")", "new_comp", ".", "set_parent_path", "(", "self", ".", "path", ")", "self", ".", "components", ".", "append", "(", "new_comp", ")", "return", "new_comp" ]
32.142857
15.857143
def get_file_annotate(self, path): """ Returns a generator of four element tuples with lineno, sha, changeset lazy loader and line TODO: This function now uses os underlying 'git' command which is generally not good. Should be replaced with algorithm iterating commits. """ cmd = 'blame -l --root -r %s -- "%s"' % (self.id, path) # -l ==> outputs long shas (and we need all 40 characters) # --root ==> doesn't put '^' character for bounderies # -r sha ==> blames for the given revision so, se = self.repository.run_git_command(cmd) for i, blame_line in enumerate(so.split('\n')[:-1]): ln_no = i + 1 sha, line = re.split(r' ', blame_line, 1) yield (ln_no, sha, lambda: self.repository.get_changeset(sha), line)
[ "def", "get_file_annotate", "(", "self", ",", "path", ")", ":", "cmd", "=", "'blame -l --root -r %s -- \"%s\"'", "%", "(", "self", ".", "id", ",", "path", ")", "# -l ==> outputs long shas (and we need all 40 characters)", "# --root ==> doesn't put '^' character for bounderies", "# -r sha ==> blames for the given revision", "so", ",", "se", "=", "self", ".", "repository", ".", "run_git_command", "(", "cmd", ")", "for", "i", ",", "blame_line", "in", "enumerate", "(", "so", ".", "split", "(", "'\\n'", ")", "[", ":", "-", "1", "]", ")", ":", "ln_no", "=", "i", "+", "1", "sha", ",", "line", "=", "re", ".", "split", "(", "r' '", ",", "blame_line", ",", "1", ")", "yield", "(", "ln_no", ",", "sha", ",", "lambda", ":", "self", ".", "repository", ".", "get_changeset", "(", "sha", ")", ",", "line", ")" ]
44.263158
20.473684
def inverse(self): """ Inverts image (all nonzeros become zeros and vice verse) Returns ------- :obj:`BinaryImage` inverse of this binary image """ data = np.zeros(self.shape).astype(np.uint8) ind = np.where(self.data == 0) data[ind[0], ind[1], ...] = BINARY_IM_MAX_VAL return BinaryImage(data, self._frame)
[ "def", "inverse", "(", "self", ")", ":", "data", "=", "np", ".", "zeros", "(", "self", ".", "shape", ")", ".", "astype", "(", "np", ".", "uint8", ")", "ind", "=", "np", ".", "where", "(", "self", ".", "data", "==", "0", ")", "data", "[", "ind", "[", "0", "]", ",", "ind", "[", "1", "]", ",", "...", "]", "=", "BINARY_IM_MAX_VAL", "return", "BinaryImage", "(", "data", ",", "self", ".", "_frame", ")" ]
34.636364
10.727273
def update_frame(self, key, ranges=None): """ Update the internal state of the Plot to represent the given key tuple (where integers represent frames). Returns this state. """ ranges = self.compute_ranges(self.layout, key, ranges) for r, c in self.coords: subplot = self.subplots.get((r, c), None) if subplot is not None: subplot.update_frame(key, ranges) title = self._get_title_div(key) if title: self.handles['title'] = title
[ "def", "update_frame", "(", "self", ",", "key", ",", "ranges", "=", "None", ")", ":", "ranges", "=", "self", ".", "compute_ranges", "(", "self", ".", "layout", ",", "key", ",", "ranges", ")", "for", "r", ",", "c", "in", "self", ".", "coords", ":", "subplot", "=", "self", ".", "subplots", ".", "get", "(", "(", "r", ",", "c", ")", ",", "None", ")", "if", "subplot", "is", "not", "None", ":", "subplot", ".", "update_frame", "(", "key", ",", "ranges", ")", "title", "=", "self", ".", "_get_title_div", "(", "key", ")", "if", "title", ":", "self", ".", "handles", "[", "'title'", "]", "=", "title" ]
38.5
11.5
def iter_options(grouped_choices, cutoff=None, cutoff_text=None): """ Helper function for options and option groups in templates. """ class StartOptionGroup(object): start_option_group = True end_option_group = False def __init__(self, label): self.label = label class EndOptionGroup(object): start_option_group = False end_option_group = True class Option(object): start_option_group = False end_option_group = False def __init__(self, value, display_text, disabled=False): self.value = value self.display_text = display_text self.disabled = disabled count = 0 for key, value in grouped_choices.items(): if cutoff and count >= cutoff: break if isinstance(value, dict): yield StartOptionGroup(label=key) for sub_key, sub_value in value.items(): if cutoff and count >= cutoff: break yield Option(value=sub_key, display_text=sub_value) count += 1 yield EndOptionGroup() else: yield Option(value=key, display_text=value) count += 1 if cutoff and count >= cutoff and cutoff_text: cutoff_text = cutoff_text.format(count=cutoff) yield Option(value='n/a', display_text=cutoff_text, disabled=True)
[ "def", "iter_options", "(", "grouped_choices", ",", "cutoff", "=", "None", ",", "cutoff_text", "=", "None", ")", ":", "class", "StartOptionGroup", "(", "object", ")", ":", "start_option_group", "=", "True", "end_option_group", "=", "False", "def", "__init__", "(", "self", ",", "label", ")", ":", "self", ".", "label", "=", "label", "class", "EndOptionGroup", "(", "object", ")", ":", "start_option_group", "=", "False", "end_option_group", "=", "True", "class", "Option", "(", "object", ")", ":", "start_option_group", "=", "False", "end_option_group", "=", "False", "def", "__init__", "(", "self", ",", "value", ",", "display_text", ",", "disabled", "=", "False", ")", ":", "self", ".", "value", "=", "value", "self", ".", "display_text", "=", "display_text", "self", ".", "disabled", "=", "disabled", "count", "=", "0", "for", "key", ",", "value", "in", "grouped_choices", ".", "items", "(", ")", ":", "if", "cutoff", "and", "count", ">=", "cutoff", ":", "break", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "yield", "StartOptionGroup", "(", "label", "=", "key", ")", "for", "sub_key", ",", "sub_value", "in", "value", ".", "items", "(", ")", ":", "if", "cutoff", "and", "count", ">=", "cutoff", ":", "break", "yield", "Option", "(", "value", "=", "sub_key", ",", "display_text", "=", "sub_value", ")", "count", "+=", "1", "yield", "EndOptionGroup", "(", ")", "else", ":", "yield", "Option", "(", "value", "=", "key", ",", "display_text", "=", "value", ")", "count", "+=", "1", "if", "cutoff", "and", "count", ">=", "cutoff", "and", "cutoff_text", ":", "cutoff_text", "=", "cutoff_text", ".", "format", "(", "count", "=", "cutoff", ")", "yield", "Option", "(", "value", "=", "'n/a'", ",", "display_text", "=", "cutoff_text", ",", "disabled", "=", "True", ")" ]
30.688889
16.955556
def xcorr(x1,x2,Nlags): """ r12, k = xcorr(x1,x2,Nlags), r12 and k are ndarray's Compute the energy normalized cross correlation between the sequences x1 and x2. If x1 = x2 the cross correlation is the autocorrelation. The number of lags sets how many lags to return centered about zero """ K = 2*(int(np.floor(len(x1)/2))) X1 = fft.fft(x1[:K]) X2 = fft.fft(x2[:K]) E1 = sum(abs(x1[:K])**2) E2 = sum(abs(x2[:K])**2) r12 = np.fft.ifft(X1*np.conj(X2))/np.sqrt(E1*E2) k = np.arange(K) - int(np.floor(K/2)) r12 = np.fft.fftshift(r12) idx = np.nonzero(np.ravel(abs(k) <= Nlags)) return r12[idx], k[idx]
[ "def", "xcorr", "(", "x1", ",", "x2", ",", "Nlags", ")", ":", "K", "=", "2", "*", "(", "int", "(", "np", ".", "floor", "(", "len", "(", "x1", ")", "/", "2", ")", ")", ")", "X1", "=", "fft", ".", "fft", "(", "x1", "[", ":", "K", "]", ")", "X2", "=", "fft", ".", "fft", "(", "x2", "[", ":", "K", "]", ")", "E1", "=", "sum", "(", "abs", "(", "x1", "[", ":", "K", "]", ")", "**", "2", ")", "E2", "=", "sum", "(", "abs", "(", "x2", "[", ":", "K", "]", ")", "**", "2", ")", "r12", "=", "np", ".", "fft", ".", "ifft", "(", "X1", "*", "np", ".", "conj", "(", "X2", ")", ")", "/", "np", ".", "sqrt", "(", "E1", "*", "E2", ")", "k", "=", "np", ".", "arange", "(", "K", ")", "-", "int", "(", "np", ".", "floor", "(", "K", "/", "2", ")", ")", "r12", "=", "np", ".", "fft", ".", "fftshift", "(", "r12", ")", "idx", "=", "np", ".", "nonzero", "(", "np", ".", "ravel", "(", "abs", "(", "k", ")", "<=", "Nlags", ")", ")", "return", "r12", "[", "idx", "]", ",", "k", "[", "idx", "]" ]
37.941176
13.588235
def parse_cache(self, full_df): """ Format the cached data model into a dictionary of DataFrames and a criteria map DataFrame. Parameters ---------- full_df : DataFrame result of self.get_dm_offline() Returns ---------- data_model : dictionary of DataFrames crit_map : DataFrame """ data_model = {} levels = ['specimens', 'samples', 'sites', 'locations', 'ages', 'measurements', 'criteria', 'contribution', 'images'] criteria_map = pd.DataFrame(full_df['criteria_map']) for level in levels: df = pd.DataFrame(full_df['tables'][level]['columns']) data_model[level] = df.transpose() # replace np.nan with None data_model[level] = data_model[level].where((pd.notnull(data_model[level])), None) return data_model, criteria_map
[ "def", "parse_cache", "(", "self", ",", "full_df", ")", ":", "data_model", "=", "{", "}", "levels", "=", "[", "'specimens'", ",", "'samples'", ",", "'sites'", ",", "'locations'", ",", "'ages'", ",", "'measurements'", ",", "'criteria'", ",", "'contribution'", ",", "'images'", "]", "criteria_map", "=", "pd", ".", "DataFrame", "(", "full_df", "[", "'criteria_map'", "]", ")", "for", "level", "in", "levels", ":", "df", "=", "pd", ".", "DataFrame", "(", "full_df", "[", "'tables'", "]", "[", "level", "]", "[", "'columns'", "]", ")", "data_model", "[", "level", "]", "=", "df", ".", "transpose", "(", ")", "# replace np.nan with None", "data_model", "[", "level", "]", "=", "data_model", "[", "level", "]", ".", "where", "(", "(", "pd", ".", "notnull", "(", "data_model", "[", "level", "]", ")", ")", ",", "None", ")", "return", "data_model", ",", "criteria_map" ]
35.192308
17.192308
def list_dataset_uris(cls, base_uri, config_path): """Return list containing URIs with base URI.""" storage_account_name = generous_parse_uri(base_uri).netloc blobservice = get_blob_service(storage_account_name, config_path) containers = blobservice.list_containers(include_metadata=True) uri_list = [] for c in containers: admin_metadata = c.metadata uri = cls.generate_uri( admin_metadata['name'], admin_metadata['uuid'], base_uri ) uri_list.append(uri) return uri_list
[ "def", "list_dataset_uris", "(", "cls", ",", "base_uri", ",", "config_path", ")", ":", "storage_account_name", "=", "generous_parse_uri", "(", "base_uri", ")", ".", "netloc", "blobservice", "=", "get_blob_service", "(", "storage_account_name", ",", "config_path", ")", "containers", "=", "blobservice", ".", "list_containers", "(", "include_metadata", "=", "True", ")", "uri_list", "=", "[", "]", "for", "c", "in", "containers", ":", "admin_metadata", "=", "c", ".", "metadata", "uri", "=", "cls", ".", "generate_uri", "(", "admin_metadata", "[", "'name'", "]", ",", "admin_metadata", "[", "'uuid'", "]", ",", "base_uri", ")", "uri_list", ".", "append", "(", "uri", ")", "return", "uri_list" ]
33.833333
18.166667
def github_belspec_files(spec_dir, force: bool = False): """Get belspec files from Github repo Args: spec_dir: directory to store the BEL Specification and derived files force: force update of BEL Specifications from Github - skipped if local files less than 1 day old """ if not force: dtnow = datetime.datetime.utcnow() delta = datetime.timedelta(1) yesterday = dtnow - delta for fn in glob.glob(f"{spec_dir}/bel*yaml"): if datetime.datetime.fromtimestamp(os.path.getmtime(fn)) > yesterday: log.info("Skipping BEL Specification update - specs less than 1 day old") return repo_url = "https://api.github.com/repos/belbio/bel_specifications/contents/specifications" params = {} github_access_token = os.getenv("GITHUB_ACCESS_TOKEN", "") if github_access_token: params = {"access_token": github_access_token} r = requests.get(repo_url, params=params) if r.status_code == 200: results = r.json() for f in results: url = f["download_url"] fn = os.path.basename(url) if "yaml" not in fn and "yml" in fn: fn = fn.replace("yml", "yaml") r = requests.get(url, params=params, allow_redirects=True) if r.status_code == 200: open(f"{spec_dir}/{fn}", "wb").write(r.content) else: sys.exit( f"Could not get BEL Spec file {url} from Github -- Status: {r.status_code} Msg: {r.content}" ) else: sys.exit( f"Could not get BEL Spec directory listing from Github -- Status: {r.status_code} Msg: {r.content}" )
[ "def", "github_belspec_files", "(", "spec_dir", ",", "force", ":", "bool", "=", "False", ")", ":", "if", "not", "force", ":", "dtnow", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "delta", "=", "datetime", ".", "timedelta", "(", "1", ")", "yesterday", "=", "dtnow", "-", "delta", "for", "fn", "in", "glob", ".", "glob", "(", "f\"{spec_dir}/bel*yaml\"", ")", ":", "if", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "os", ".", "path", ".", "getmtime", "(", "fn", ")", ")", ">", "yesterday", ":", "log", ".", "info", "(", "\"Skipping BEL Specification update - specs less than 1 day old\"", ")", "return", "repo_url", "=", "\"https://api.github.com/repos/belbio/bel_specifications/contents/specifications\"", "params", "=", "{", "}", "github_access_token", "=", "os", ".", "getenv", "(", "\"GITHUB_ACCESS_TOKEN\"", ",", "\"\"", ")", "if", "github_access_token", ":", "params", "=", "{", "\"access_token\"", ":", "github_access_token", "}", "r", "=", "requests", ".", "get", "(", "repo_url", ",", "params", "=", "params", ")", "if", "r", ".", "status_code", "==", "200", ":", "results", "=", "r", ".", "json", "(", ")", "for", "f", "in", "results", ":", "url", "=", "f", "[", "\"download_url\"", "]", "fn", "=", "os", ".", "path", ".", "basename", "(", "url", ")", "if", "\"yaml\"", "not", "in", "fn", "and", "\"yml\"", "in", "fn", ":", "fn", "=", "fn", ".", "replace", "(", "\"yml\"", ",", "\"yaml\"", ")", "r", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "params", ",", "allow_redirects", "=", "True", ")", "if", "r", ".", "status_code", "==", "200", ":", "open", "(", "f\"{spec_dir}/{fn}\"", ",", "\"wb\"", ")", ".", "write", "(", "r", ".", "content", ")", "else", ":", "sys", ".", "exit", "(", "f\"Could not get BEL Spec file {url} from Github -- Status: {r.status_code} Msg: {r.content}\"", ")", "else", ":", "sys", ".", "exit", "(", "f\"Could not get BEL Spec directory listing from Github -- Status: {r.status_code} Msg: {r.content}\"", ")" ]
36.956522
25.391304
def _parse_sod_segment(self, fptr): """Parse the SOD (start-of-data) segment. Parameters ---------- fptr : file Open file object. Returns ------- SODSegment The current SOD segment. """ offset = fptr.tell() - 2 length = 0 return SODsegment(length, offset)
[ "def", "_parse_sod_segment", "(", "self", ",", "fptr", ")", ":", "offset", "=", "fptr", ".", "tell", "(", ")", "-", "2", "length", "=", "0", "return", "SODsegment", "(", "length", ",", "offset", ")" ]
20.823529
18.117647
def _verifyDiscoveredServices(self, claimed_id, services, to_match_endpoints): """See @L{_discoverAndVerify}""" # Search the services resulting from discovery to find one # that matches the information from the assertion failure_messages = [] for endpoint in services: for to_match_endpoint in to_match_endpoints: try: self._verifyDiscoverySingle(endpoint, to_match_endpoint) except ProtocolError as why: failure_messages.append(str(why)) else: # It matches, so discover verification has # succeeded. Return this endpoint. return endpoint else: logging.error('Discovery verification failure for %s' % (claimed_id, )) for failure_message in failure_messages: logging.error(' * Endpoint mismatch: ' + failure_message) raise DiscoveryFailure( 'No matching endpoint found after discovering %s' % (claimed_id, ), None)
[ "def", "_verifyDiscoveredServices", "(", "self", ",", "claimed_id", ",", "services", ",", "to_match_endpoints", ")", ":", "# Search the services resulting from discovery to find one", "# that matches the information from the assertion", "failure_messages", "=", "[", "]", "for", "endpoint", "in", "services", ":", "for", "to_match_endpoint", "in", "to_match_endpoints", ":", "try", ":", "self", ".", "_verifyDiscoverySingle", "(", "endpoint", ",", "to_match_endpoint", ")", "except", "ProtocolError", "as", "why", ":", "failure_messages", ".", "append", "(", "str", "(", "why", ")", ")", "else", ":", "# It matches, so discover verification has", "# succeeded. Return this endpoint.", "return", "endpoint", "else", ":", "logging", ".", "error", "(", "'Discovery verification failure for %s'", "%", "(", "claimed_id", ",", ")", ")", "for", "failure_message", "in", "failure_messages", ":", "logging", ".", "error", "(", "' * Endpoint mismatch: '", "+", "failure_message", ")", "raise", "DiscoveryFailure", "(", "'No matching endpoint found after discovering %s'", "%", "(", "claimed_id", ",", ")", ",", "None", ")" ]
43.923077
17.538462
def send(self, packet, recalculate_checksum=True): """ Injects a packet into the network stack. Recalculates the checksum before sending unless recalculate_checksum=False is passed. The injected packet may be one received from recv(), or a modified version, or a completely new packet. Injected packets can be captured and diverted again by other WinDivert handles with lower priorities. The remapped function is WinDivertSend:: BOOL WinDivertSend( __in HANDLE handle, __in PVOID pPacket, __in UINT packetLen, __in PWINDIVERT_ADDRESS pAddr, __out_opt UINT *sendLen ); For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_send :return: The return value is the number of bytes actually sent. """ if recalculate_checksum: packet.recalculate_checksums() send_len = c_uint(0) if PY2: # .from_buffer(memoryview) does not work on PY2 buff = bytearray(packet.raw) else: buff = packet.raw buff = (c_char * len(packet.raw)).from_buffer(buff) windivert_dll.WinDivertSend(self._handle, buff, len(packet.raw), byref(packet.wd_addr), byref(send_len)) return send_len
[ "def", "send", "(", "self", ",", "packet", ",", "recalculate_checksum", "=", "True", ")", ":", "if", "recalculate_checksum", ":", "packet", ".", "recalculate_checksums", "(", ")", "send_len", "=", "c_uint", "(", "0", ")", "if", "PY2", ":", "# .from_buffer(memoryview) does not work on PY2", "buff", "=", "bytearray", "(", "packet", ".", "raw", ")", "else", ":", "buff", "=", "packet", ".", "raw", "buff", "=", "(", "c_char", "*", "len", "(", "packet", ".", "raw", ")", ")", ".", "from_buffer", "(", "buff", ")", "windivert_dll", ".", "WinDivertSend", "(", "self", ".", "_handle", ",", "buff", ",", "len", "(", "packet", ".", "raw", ")", ",", "byref", "(", "packet", ".", "wd_addr", ")", ",", "byref", "(", "send_len", ")", ")", "return", "send_len" ]
39.057143
23
def month_crumb(date): """ Crumb for a month. """ year = date.strftime('%Y') month = date.strftime('%m') month_text = DateFormat(date).format('F').capitalize() return Crumb(month_text, reverse('zinnia:entry_archive_month', args=[year, month]))
[ "def", "month_crumb", "(", "date", ")", ":", "year", "=", "date", ".", "strftime", "(", "'%Y'", ")", "month", "=", "date", ".", "strftime", "(", "'%m'", ")", "month_text", "=", "DateFormat", "(", "date", ")", ".", "format", "(", "'F'", ")", ".", "capitalize", "(", ")", "return", "Crumb", "(", "month_text", ",", "reverse", "(", "'zinnia:entry_archive_month'", ",", "args", "=", "[", "year", ",", "month", "]", ")", ")" ]
33.333333
12.888889
def widths_in_range_mm( self, minwidth=EMIR_MINIMUM_SLITLET_WIDTH_MM, maxwidth=EMIR_MAXIMUM_SLITLET_WIDTH_MM ): """Return list of slitlets which width is within given range Parameters ---------- minwidth : float Minimum slit width (mm). maxwidth : float Maximum slit width (mm). Returns ------- list_ok : list List of booleans indicating whether the corresponding slitlet width is within range """ list_ok = [] for i in range(EMIR_NBARS): slitlet_ok = minwidth <= self._csu_bar_slit_width[i] <= maxwidth if slitlet_ok: list_ok.append(i + 1) return list_ok
[ "def", "widths_in_range_mm", "(", "self", ",", "minwidth", "=", "EMIR_MINIMUM_SLITLET_WIDTH_MM", ",", "maxwidth", "=", "EMIR_MAXIMUM_SLITLET_WIDTH_MM", ")", ":", "list_ok", "=", "[", "]", "for", "i", "in", "range", "(", "EMIR_NBARS", ")", ":", "slitlet_ok", "=", "minwidth", "<=", "self", ".", "_csu_bar_slit_width", "[", "i", "]", "<=", "maxwidth", "if", "slitlet_ok", ":", "list_ok", ".", "append", "(", "i", "+", "1", ")", "return", "list_ok" ]
26.068966
19.62069
def get_canonical_transaction(self, transaction_hash: Hash32) -> BaseTransaction: """ Returns the requested transaction as specified by the transaction hash from the canonical chain. Raises TransactionNotFound if no transaction with the specified hash is found in the main chain. """ (block_num, index) = self.chaindb.get_transaction_index(transaction_hash) VM_class = self.get_vm_class_for_block_number(block_num) transaction = self.chaindb.get_transaction_by_index( block_num, index, VM_class.get_transaction_class(), ) if transaction.hash == transaction_hash: return transaction else: raise TransactionNotFound("Found transaction {} instead of {} in block {} at {}".format( encode_hex(transaction.hash), encode_hex(transaction_hash), block_num, index, ))
[ "def", "get_canonical_transaction", "(", "self", ",", "transaction_hash", ":", "Hash32", ")", "->", "BaseTransaction", ":", "(", "block_num", ",", "index", ")", "=", "self", ".", "chaindb", ".", "get_transaction_index", "(", "transaction_hash", ")", "VM_class", "=", "self", ".", "get_vm_class_for_block_number", "(", "block_num", ")", "transaction", "=", "self", ".", "chaindb", ".", "get_transaction_by_index", "(", "block_num", ",", "index", ",", "VM_class", ".", "get_transaction_class", "(", ")", ",", ")", "if", "transaction", ".", "hash", "==", "transaction_hash", ":", "return", "transaction", "else", ":", "raise", "TransactionNotFound", "(", "\"Found transaction {} instead of {} in block {} at {}\"", ".", "format", "(", "encode_hex", "(", "transaction", ".", "hash", ")", ",", "encode_hex", "(", "transaction_hash", ")", ",", "block_num", ",", "index", ",", ")", ")" ]
37.192308
22.576923
def _releaseId(self, msgId): """ Release message ID to the pool. @param msgId: message ID, no longer on the wire @type msgId: C{str} """ self._uuids.append(msgId) if len(self._uuids) > 2 * self.UUID_POOL_GEN_SIZE: self._uuids[-self.UUID_POOL_GEN_SIZE:] = []
[ "def", "_releaseId", "(", "self", ",", "msgId", ")", ":", "self", ".", "_uuids", ".", "append", "(", "msgId", ")", "if", "len", "(", "self", ".", "_uuids", ")", ">", "2", "*", "self", ".", "UUID_POOL_GEN_SIZE", ":", "self", ".", "_uuids", "[", "-", "self", ".", "UUID_POOL_GEN_SIZE", ":", "]", "=", "[", "]" ]
31.7
12.1
def task_done(self): """Mark a task as done.""" with self._lock: unfinished = self._unfinished_tasks - 1 if unfinished < 0: raise RuntimeError('task_done() called too many times') elif unfinished == 0: self._alldone.notify() self._unfinished_tasks = unfinished
[ "def", "task_done", "(", "self", ")", ":", "with", "self", ".", "_lock", ":", "unfinished", "=", "self", ".", "_unfinished_tasks", "-", "1", "if", "unfinished", "<", "0", ":", "raise", "RuntimeError", "(", "'task_done() called too many times'", ")", "elif", "unfinished", "==", "0", ":", "self", ".", "_alldone", ".", "notify", "(", ")", "self", ".", "_unfinished_tasks", "=", "unfinished" ]
38.666667
11.555556
def tupleize( rows, alphabetize_columns=getattr( settings, 'ALPHABETIZE_COLUMNS', False)): """Also alphabetizes columns and returns a tuple of tuples""" # define a blank list as our return object l = [] for r in rows: row = [] row = list(r.values()) l.append(row) # alphabetize if alphabetize_columns: col = sorted(zip(*l)) result = zip(*col) return result else: return l
[ "def", "tupleize", "(", "rows", ",", "alphabetize_columns", "=", "getattr", "(", "settings", ",", "'ALPHABETIZE_COLUMNS'", ",", "False", ")", ")", ":", "# define a blank list as our return object\r", "l", "=", "[", "]", "for", "r", "in", "rows", ":", "row", "=", "[", "]", "row", "=", "list", "(", "r", ".", "values", "(", ")", ")", "l", ".", "append", "(", "row", ")", "# alphabetize\r", "if", "alphabetize_columns", ":", "col", "=", "sorted", "(", "zip", "(", "*", "l", ")", ")", "result", "=", "zip", "(", "*", "col", ")", "return", "result", "else", ":", "return", "l" ]
22.227273
19.590909
def stats_tube(self, tube: str) -> Stats: """Returns tube statistics. :param tube: The tube to return statistics for. """ return self._stats_cmd(b'stats-tube %b' % tube.encode('ascii'))
[ "def", "stats_tube", "(", "self", ",", "tube", ":", "str", ")", "->", "Stats", ":", "return", "self", ".", "_stats_cmd", "(", "b'stats-tube %b'", "%", "tube", ".", "encode", "(", "'ascii'", ")", ")" ]
35.5
14.5
def docSetRootElement(self, doc): """Set the root element of the document (doc->children is a list containing possibly comments, PIs, etc ...). """ if doc is None: doc__o = None else: doc__o = doc._o ret = libxml2mod.xmlDocSetRootElement(doc__o, self._o) if ret is None:return None __tmp = xmlNode(_obj=ret) return __tmp
[ "def", "docSetRootElement", "(", "self", ",", "doc", ")", ":", "if", "doc", "is", "None", ":", "doc__o", "=", "None", "else", ":", "doc__o", "=", "doc", ".", "_o", "ret", "=", "libxml2mod", ".", "xmlDocSetRootElement", "(", "doc__o", ",", "self", ".", "_o", ")", "if", "ret", "is", "None", ":", "return", "None", "__tmp", "=", "xmlNode", "(", "_obj", "=", "ret", ")", "return", "__tmp" ]
42.111111
8.444444
def is_draft_version(self): """ Return if this version is the draft version of a layer """ pub_ver = getattr(self, 'published_version', None) latest_ver = getattr(self, 'latest_version', None) this_ver = getattr(self, 'this_version', None) return this_ver and latest_ver and (this_ver == latest_ver) and (latest_ver != pub_ver)
[ "def", "is_draft_version", "(", "self", ")", ":", "pub_ver", "=", "getattr", "(", "self", ",", "'published_version'", ",", "None", ")", "latest_ver", "=", "getattr", "(", "self", ",", "'latest_version'", ",", "None", ")", "this_ver", "=", "getattr", "(", "self", ",", "'this_version'", ",", "None", ")", "return", "this_ver", "and", "latest_ver", "and", "(", "this_ver", "==", "latest_ver", ")", "and", "(", "latest_ver", "!=", "pub_ver", ")" ]
60.333333
19.666667
def to_bool(x): """"Converts to a boolean in a semantically meaningful way.""" if isinstance(x, bool): return x elif isinstance(x, str): return False if x.lower() in _FALSES else True else: return bool(x)
[ "def", "to_bool", "(", "x", ")", ":", "if", "isinstance", "(", "x", ",", "bool", ")", ":", "return", "x", "elif", "isinstance", "(", "x", ",", "str", ")", ":", "return", "False", "if", "x", ".", "lower", "(", ")", "in", "_FALSES", "else", "True", "else", ":", "return", "bool", "(", "x", ")" ]
29.625
17.125
def execute_fragment_under_context(self, ctx, start_label, end_label): ''' just like run but returns if moved outside of the specified fragment # 4 different exectution results # 0=normal, 1=return, 2=jump_outside, 3=errors # execute_fragment_under_context returns: # (return_value, typ, return_value/jump_loc/py_error) # IMPARTANT: It is guaranteed that the length of the ctx.stack is unchanged. ''' old_curr_ctx = self.current_ctx self.ctx_depth += 1 old_stack_len = len(ctx.stack) old_ret_len = len(self.return_locs) old_ctx_len = len(self.contexts) try: self.current_ctx = ctx return self._execute_fragment_under_context( ctx, start_label, end_label) except JsException as err: if self.debug_mode: self._on_fragment_exit("js errors") # undo the things that were put on the stack (if any) to ensure a proper error recovery del ctx.stack[old_stack_len:] del self.return_locs[old_ret_len:] del self.contexts[old_ctx_len :] return undefined, 3, err finally: self.ctx_depth -= 1 self.current_ctx = old_curr_ctx assert old_stack_len == len(ctx.stack)
[ "def", "execute_fragment_under_context", "(", "self", ",", "ctx", ",", "start_label", ",", "end_label", ")", ":", "old_curr_ctx", "=", "self", ".", "current_ctx", "self", ".", "ctx_depth", "+=", "1", "old_stack_len", "=", "len", "(", "ctx", ".", "stack", ")", "old_ret_len", "=", "len", "(", "self", ".", "return_locs", ")", "old_ctx_len", "=", "len", "(", "self", ".", "contexts", ")", "try", ":", "self", ".", "current_ctx", "=", "ctx", "return", "self", ".", "_execute_fragment_under_context", "(", "ctx", ",", "start_label", ",", "end_label", ")", "except", "JsException", "as", "err", ":", "if", "self", ".", "debug_mode", ":", "self", ".", "_on_fragment_exit", "(", "\"js errors\"", ")", "# undo the things that were put on the stack (if any) to ensure a proper error recovery", "del", "ctx", ".", "stack", "[", "old_stack_len", ":", "]", "del", "self", ".", "return_locs", "[", "old_ret_len", ":", "]", "del", "self", ".", "contexts", "[", "old_ctx_len", ":", "]", "return", "undefined", ",", "3", ",", "err", "finally", ":", "self", ".", "ctx_depth", "-=", "1", "self", ".", "current_ctx", "=", "old_curr_ctx", "assert", "old_stack_len", "==", "len", "(", "ctx", ".", "stack", ")" ]
45.655172
14.689655
def delete_chat_photo(chat_id, **kwargs): """ Use this method to delete a chat photo. Photos can't be changed for private chats. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest` :return: Returns True on success. :rtype: bool """ # required args params = dict(chat_id=chat_id) return TelegramBotRPCRequest('deleteChatPhoto', params=params, on_result=lambda result: result, **kwargs)
[ "def", "delete_chat_photo", "(", "chat_id", ",", "*", "*", "kwargs", ")", ":", "# required args", "params", "=", "dict", "(", "chat_id", "=", "chat_id", ")", "return", "TelegramBotRPCRequest", "(", "'deleteChatPhoto'", ",", "params", "=", "params", ",", "on_result", "=", "lambda", "result", ":", "result", ",", "*", "*", "kwargs", ")" ]
47.642857
31.5
def create_key(file_): """ Create a key and save it into ``file_``. Note that ``file`` must be opened in binary mode. """ pkey = crypto.PKey() pkey.generate_key(crypto.TYPE_RSA, 2048) file_.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey)) file_.flush()
[ "def", "create_key", "(", "file_", ")", ":", "pkey", "=", "crypto", ".", "PKey", "(", ")", "pkey", ".", "generate_key", "(", "crypto", ".", "TYPE_RSA", ",", "2048", ")", "file_", ".", "write", "(", "crypto", ".", "dump_privatekey", "(", "crypto", ".", "FILETYPE_PEM", ",", "pkey", ")", ")", "file_", ".", "flush", "(", ")" ]
25.818182
16.727273
def create_embedded_class(self, method): """ Build the estimator class. Returns ------- :return : string The built class as string. """ temp_class = self.temp('embedded.class') return temp_class.format(class_name=self.class_name, method_name=self.method_name, method=method, n_features=self.n_features)
[ "def", "create_embedded_class", "(", "self", ",", "method", ")", ":", "temp_class", "=", "self", ".", "temp", "(", "'embedded.class'", ")", "return", "temp_class", ".", "format", "(", "class_name", "=", "self", ".", "class_name", ",", "method_name", "=", "self", ".", "method_name", ",", "method", "=", "method", ",", "n_features", "=", "self", ".", "n_features", ")" ]
33.307692
15.307692
def get_doc_lengths(self): ''' Returns a list of document lengths in words Returns ------- np.array ''' idx_to_delete_list = self._build_term_index_list(True, self._get_non_unigrams()) unigram_X, _ = self._get_X_after_delete_terms(idx_to_delete_list) return unigram_X.sum(axis=1).A1
[ "def", "get_doc_lengths", "(", "self", ")", ":", "idx_to_delete_list", "=", "self", ".", "_build_term_index_list", "(", "True", ",", "self", ".", "_get_non_unigrams", "(", ")", ")", "unigram_X", ",", "_", "=", "self", ".", "_get_X_after_delete_terms", "(", "idx_to_delete_list", ")", "return", "unigram_X", ".", "sum", "(", "axis", "=", "1", ")", ".", "A1" ]
31.363636
25.363636
def rm_job(name): ''' Remove the specified job from the server. CLI Example: .. code-block:: bash salt chronos-minion-id chronos.rm_job my-job ''' response = salt.utils.http.query( "{0}/scheduler/job/{1}".format(_base_url(), name), method='DELETE', ) return True
[ "def", "rm_job", "(", "name", ")", ":", "response", "=", "salt", ".", "utils", ".", "http", ".", "query", "(", "\"{0}/scheduler/job/{1}\"", ".", "format", "(", "_base_url", "(", ")", ",", "name", ")", ",", "method", "=", "'DELETE'", ",", ")", "return", "True" ]
20.466667
24.2
def _aggregations_config(self): """Load aggregation configurations.""" result = {} for ep in iter_entry_points( group=self.entry_point_group_aggs): for cfg in ep.load()(): if cfg['aggregation_name'] not in self.enabled_aggregations: continue elif cfg['aggregation_name'] in result: raise DuplicateAggregationError( 'Duplicate aggregation {0} in entry point ' '{1}'.format(cfg['event_type'], ep.name)) # Update the default configuration with env/overlay config. cfg.update( self.enabled_aggregations[cfg['aggregation_name']] or {} ) result[cfg['aggregation_name']] = cfg return result
[ "def", "_aggregations_config", "(", "self", ")", ":", "result", "=", "{", "}", "for", "ep", "in", "iter_entry_points", "(", "group", "=", "self", ".", "entry_point_group_aggs", ")", ":", "for", "cfg", "in", "ep", ".", "load", "(", ")", "(", ")", ":", "if", "cfg", "[", "'aggregation_name'", "]", "not", "in", "self", ".", "enabled_aggregations", ":", "continue", "elif", "cfg", "[", "'aggregation_name'", "]", "in", "result", ":", "raise", "DuplicateAggregationError", "(", "'Duplicate aggregation {0} in entry point '", "'{1}'", ".", "format", "(", "cfg", "[", "'event_type'", "]", ",", "ep", ".", "name", ")", ")", "# Update the default configuration with env/overlay config.", "cfg", ".", "update", "(", "self", ".", "enabled_aggregations", "[", "cfg", "[", "'aggregation_name'", "]", "]", "or", "{", "}", ")", "result", "[", "cfg", "[", "'aggregation_name'", "]", "]", "=", "cfg", "return", "result" ]
46.111111
17.555556
def __chopStringDict(self, data): '''Returns a dictionary of the provided raw service/host check string.''' r = {} d = data.split('\t') for item in d: item_parts = item.split('::') if len(item_parts) == 2: (name, value) = item_parts else: name = item_parts[0] value = item_parts[1] name = self.__filter(name) r[name] = value if "hostperfdata" in r: r["type"] = "hostcheck" r["perfdata"] = r["hostperfdata"] r["checkcommand"] = re.search("(.*?)!\(?.*", r["hostcheckcommand"]).group(1) r["name"] = "hostcheck" else: r["type"] = "servicecheck" r["perfdata"] = r["serviceperfdata"] r["checkcommand"] = re.search("((.*)(?=\!)|(.*))", r["servicecheckcommand"]).group(1) r["name"] = self.__filter(r["servicedesc"]) r["hostname"] = self.replacePeriod(self.__filter(r["hostname"])) return r
[ "def", "__chopStringDict", "(", "self", ",", "data", ")", ":", "r", "=", "{", "}", "d", "=", "data", ".", "split", "(", "'\\t'", ")", "for", "item", "in", "d", ":", "item_parts", "=", "item", ".", "split", "(", "'::'", ")", "if", "len", "(", "item_parts", ")", "==", "2", ":", "(", "name", ",", "value", ")", "=", "item_parts", "else", ":", "name", "=", "item_parts", "[", "0", "]", "value", "=", "item_parts", "[", "1", "]", "name", "=", "self", ".", "__filter", "(", "name", ")", "r", "[", "name", "]", "=", "value", "if", "\"hostperfdata\"", "in", "r", ":", "r", "[", "\"type\"", "]", "=", "\"hostcheck\"", "r", "[", "\"perfdata\"", "]", "=", "r", "[", "\"hostperfdata\"", "]", "r", "[", "\"checkcommand\"", "]", "=", "re", ".", "search", "(", "\"(.*?)!\\(?.*\"", ",", "r", "[", "\"hostcheckcommand\"", "]", ")", ".", "group", "(", "1", ")", "r", "[", "\"name\"", "]", "=", "\"hostcheck\"", "else", ":", "r", "[", "\"type\"", "]", "=", "\"servicecheck\"", "r", "[", "\"perfdata\"", "]", "=", "r", "[", "\"serviceperfdata\"", "]", "r", "[", "\"checkcommand\"", "]", "=", "re", ".", "search", "(", "\"((.*)(?=\\!)|(.*))\"", ",", "r", "[", "\"servicecheckcommand\"", "]", ")", ".", "group", "(", "1", ")", "r", "[", "\"name\"", "]", "=", "self", ".", "__filter", "(", "r", "[", "\"servicedesc\"", "]", ")", "r", "[", "\"hostname\"", "]", "=", "self", ".", "replacePeriod", "(", "self", ".", "__filter", "(", "r", "[", "\"hostname\"", "]", ")", ")", "return", "r" ]
33.064516
20.419355
def run(self): ''' Execute the runner sequence ''' # Print documentation only if self.opts.get('doc', False): self.print_docs() else: return self._run_runner()
[ "def", "run", "(", "self", ")", ":", "# Print documentation only", "if", "self", ".", "opts", ".", "get", "(", "'doc'", ",", "False", ")", ":", "self", ".", "print_docs", "(", ")", "else", ":", "return", "self", ".", "_run_runner", "(", ")" ]
24.777778
15.222222
def tile_coord(self, xtile, ytile, zoom): """ This returns the NW-corner of the square. Use the function with xtile+1 and/or ytile+1 to get the other corners. With xtile+0.5 & ytile+0.5 it will return the center of the tile. http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#Tile_numbers_to_lon..2Flat._2 """ assert self.tile_srid == 3857, 'Custom tile projection not supported yet' n = 2.0 ** zoom lon_deg = xtile / n * 360.0 - 180.0 lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n))) lat_deg = math.degrees(lat_rad) return (lon_deg, lat_deg)
[ "def", "tile_coord", "(", "self", ",", "xtile", ",", "ytile", ",", "zoom", ")", ":", "assert", "self", ".", "tile_srid", "==", "3857", ",", "'Custom tile projection not supported yet'", "n", "=", "2.0", "**", "zoom", "lon_deg", "=", "xtile", "/", "n", "*", "360.0", "-", "180.0", "lat_rad", "=", "math", ".", "atan", "(", "math", ".", "sinh", "(", "math", ".", "pi", "*", "(", "1", "-", "2", "*", "ytile", "/", "n", ")", ")", ")", "lat_deg", "=", "math", ".", "degrees", "(", "lat_rad", ")", "return", "(", "lon_deg", ",", "lat_deg", ")" ]
49.538462
17.846154
def ilike_helper(default): """Helper function that performs an `ilike` query if a string value is passed, otherwise the normal default operation.""" @functools.wraps(default) def wrapped(x, y): # String values should use ILIKE queries. if isinstance(y, six.string_types) and not isinstance(x.type, sa.Enum): return x.ilike("%" + y + "%") else: return default(x, y) return wrapped
[ "def", "ilike_helper", "(", "default", ")", ":", "@", "functools", ".", "wraps", "(", "default", ")", "def", "wrapped", "(", "x", ",", "y", ")", ":", "# String values should use ILIKE queries.", "if", "isinstance", "(", "y", ",", "six", ".", "string_types", ")", "and", "not", "isinstance", "(", "x", ".", "type", ",", "sa", ".", "Enum", ")", ":", "return", "x", ".", "ilike", "(", "\"%\"", "+", "y", "+", "\"%\"", ")", "else", ":", "return", "default", "(", "x", ",", "y", ")", "return", "wrapped" ]
39.727273
13.545455
def _strategy(codes, context): """ Convert SRE regex parse tree to strategy that generates strings matching that regex represented by that parse tree. `codes` is either a list of SRE regex elements representations or a particular element representation. Each element is a tuple of element code (as string) and parameters. E.g. regex 'ab[0-9]+' compiles to following elements: [ ('literal', 97), ('literal', 98), ('max_repeat', (1, 4294967295, [ ('in', [ ('range', (48, 57)) ]) ])) ] The function recursively traverses regex element tree and converts each element to strategy that generates strings that match that element. Context stores 1. List of groups (for backreferences) 2. Active regex flags (e.g. IGNORECASE, DOTALL, UNICODE, they affect behavior of various inner strategies) """ if not isinstance(codes, tuple): # List of codes strategies = [] i = 0 while i < len(codes): if codes[i][0] == sre.LITERAL and not (context.flags & re.IGNORECASE): # Merge subsequent "literals" into one `just()` strategy # that generates corresponding text if no IGNORECASE j = i + 1 while j < len(codes) and codes[j][0] == sre.LITERAL: j += 1 strategies.append(hs.just( u''.join([six.unichr(charcode) for (_, charcode) in codes[i:j]]) )) i = j else: strategies.append(_strategy(codes[i], context)) i += 1 return hs.tuples(*strategies).map(u''.join) else: # Single code code, value = codes if code == sre.LITERAL: # Regex 'a' (single char) c = six.unichr(value) if context.flags & re.IGNORECASE: return hs.sampled_from([c.lower(), c.upper()]) else: return hs.just(c) elif code == sre.NOT_LITERAL: # Regex '[^a]' (negation of a single char) c = six.unichr(value) blacklist = set([c.lower(), c.upper()]) \ if context.flags & re.IGNORECASE else [c] return hs.characters(blacklist_characters=blacklist) elif code == sre.IN: # Regex '[abc0-9]' (set of characters) charsets = value builder = CharactersBuilder(negate=charsets[0][0] == sre.NEGATE, flags=context.flags) for charset_code, charset_value in charsets: if charset_code == sre.NEGATE: # Regex '[^...]' (negation) pass elif charset_code == sre.LITERAL: # Regex '[a]' (single char) builder.add_chars(six.unichr(charset_value)) elif charset_code == sre.RANGE: # Regex '[a-z]' (char range) low, high = charset_value for x in six.moves.range(low, high+1): builder.add_chars(six.unichr(x)) elif charset_code == sre.CATEGORY: # Regex '[\w]' (char category) builder.add_category(charset_value) else: raise he.InvalidArgument( 'Unknown charset code: %s' % charset_code ) return builder.strategy elif code == sre.ANY: # Regex '.' (any char) if context.flags & re.DOTALL: return hs.characters() else: return hs.characters(blacklist_characters="\n") elif code == sre.AT: # Regexes like '^...', '...$', '\bfoo', '\Bfoo' if value == sre.AT_END: return hs.one_of(hs.just(u''), hs.just(u'\n')) return hs.just('') elif code == sre.SUBPATTERN: # Various groups: '(...)', '(:...)' or '(?P<name>...)' old_flags = context.flags if HAS_SUBPATTERN_FLAGS: context.flags = (context.flags | value[1]) & ~value[2] strat = _strategy(value[-1], context) context.flags = old_flags if value[0]: context.groups[value[0]] = strat strat = hs.shared(strat, key=value[0]) return strat elif code == sre.GROUPREF: # Regex '\\1' or '(?P=name)' (group reference) return hs.shared(context.groups[value], key=value) elif code == sre.ASSERT: # Regex '(?=...)' or '(?<=...)' (positive lookahead/lookbehind) return _strategy(value[1], context) elif code == sre.ASSERT_NOT: # Regex '(?!...)' or '(?<!...)' (negative lookahead/lookbehind) return hs.just('') elif code == sre.BRANCH: # Regex 'a|b|c' (branch) return hs.one_of([_strategy(branch, context) for branch in value[1]]) elif code in [sre.MIN_REPEAT, sre.MAX_REPEAT]: # Regexes 'a?', 'a*', 'a+' and their non-greedy variants (repeaters) at_least, at_most, regex = value if at_most == 4294967295: at_most = None return hs.lists(_strategy(regex, context), min_size=at_least, max_size=at_most).map(''.join) elif code == sre.GROUPREF_EXISTS: # Regex '(?(id/name)yes-pattern|no-pattern)' (if group exists selection) return hs.one_of( _strategy(value[1], context), _strategy(value[2], context) if value[2] else hs.just(u''), ) else: raise he.InvalidArgument('Unknown code point: %s' % repr(code))
[ "def", "_strategy", "(", "codes", ",", "context", ")", ":", "if", "not", "isinstance", "(", "codes", ",", "tuple", ")", ":", "# List of codes", "strategies", "=", "[", "]", "i", "=", "0", "while", "i", "<", "len", "(", "codes", ")", ":", "if", "codes", "[", "i", "]", "[", "0", "]", "==", "sre", ".", "LITERAL", "and", "not", "(", "context", ".", "flags", "&", "re", ".", "IGNORECASE", ")", ":", "# Merge subsequent \"literals\" into one `just()` strategy", "# that generates corresponding text if no IGNORECASE", "j", "=", "i", "+", "1", "while", "j", "<", "len", "(", "codes", ")", "and", "codes", "[", "j", "]", "[", "0", "]", "==", "sre", ".", "LITERAL", ":", "j", "+=", "1", "strategies", ".", "append", "(", "hs", ".", "just", "(", "u''", ".", "join", "(", "[", "six", ".", "unichr", "(", "charcode", ")", "for", "(", "_", ",", "charcode", ")", "in", "codes", "[", "i", ":", "j", "]", "]", ")", ")", ")", "i", "=", "j", "else", ":", "strategies", ".", "append", "(", "_strategy", "(", "codes", "[", "i", "]", ",", "context", ")", ")", "i", "+=", "1", "return", "hs", ".", "tuples", "(", "*", "strategies", ")", ".", "map", "(", "u''", ".", "join", ")", "else", ":", "# Single code", "code", ",", "value", "=", "codes", "if", "code", "==", "sre", ".", "LITERAL", ":", "# Regex 'a' (single char)", "c", "=", "six", ".", "unichr", "(", "value", ")", "if", "context", ".", "flags", "&", "re", ".", "IGNORECASE", ":", "return", "hs", ".", "sampled_from", "(", "[", "c", ".", "lower", "(", ")", ",", "c", ".", "upper", "(", ")", "]", ")", "else", ":", "return", "hs", ".", "just", "(", "c", ")", "elif", "code", "==", "sre", ".", "NOT_LITERAL", ":", "# Regex '[^a]' (negation of a single char)", "c", "=", "six", ".", "unichr", "(", "value", ")", "blacklist", "=", "set", "(", "[", "c", ".", "lower", "(", ")", ",", "c", ".", "upper", "(", ")", "]", ")", "if", "context", ".", "flags", "&", "re", ".", "IGNORECASE", "else", "[", "c", "]", "return", "hs", ".", "characters", "(", "blacklist_characters", "=", "blacklist", ")", "elif", "code", "==", "sre", ".", "IN", ":", "# Regex '[abc0-9]' (set of characters)", "charsets", "=", "value", "builder", "=", "CharactersBuilder", "(", "negate", "=", "charsets", "[", "0", "]", "[", "0", "]", "==", "sre", ".", "NEGATE", ",", "flags", "=", "context", ".", "flags", ")", "for", "charset_code", ",", "charset_value", "in", "charsets", ":", "if", "charset_code", "==", "sre", ".", "NEGATE", ":", "# Regex '[^...]' (negation)", "pass", "elif", "charset_code", "==", "sre", ".", "LITERAL", ":", "# Regex '[a]' (single char)", "builder", ".", "add_chars", "(", "six", ".", "unichr", "(", "charset_value", ")", ")", "elif", "charset_code", "==", "sre", ".", "RANGE", ":", "# Regex '[a-z]' (char range)", "low", ",", "high", "=", "charset_value", "for", "x", "in", "six", ".", "moves", ".", "range", "(", "low", ",", "high", "+", "1", ")", ":", "builder", ".", "add_chars", "(", "six", ".", "unichr", "(", "x", ")", ")", "elif", "charset_code", "==", "sre", ".", "CATEGORY", ":", "# Regex '[\\w]' (char category)", "builder", ".", "add_category", "(", "charset_value", ")", "else", ":", "raise", "he", ".", "InvalidArgument", "(", "'Unknown charset code: %s'", "%", "charset_code", ")", "return", "builder", ".", "strategy", "elif", "code", "==", "sre", ".", "ANY", ":", "# Regex '.' (any char)", "if", "context", ".", "flags", "&", "re", ".", "DOTALL", ":", "return", "hs", ".", "characters", "(", ")", "else", ":", "return", "hs", ".", "characters", "(", "blacklist_characters", "=", "\"\\n\"", ")", "elif", "code", "==", "sre", ".", "AT", ":", "# Regexes like '^...', '...$', '\\bfoo', '\\Bfoo'", "if", "value", "==", "sre", ".", "AT_END", ":", "return", "hs", ".", "one_of", "(", "hs", ".", "just", "(", "u''", ")", ",", "hs", ".", "just", "(", "u'\\n'", ")", ")", "return", "hs", ".", "just", "(", "''", ")", "elif", "code", "==", "sre", ".", "SUBPATTERN", ":", "# Various groups: '(...)', '(:...)' or '(?P<name>...)'", "old_flags", "=", "context", ".", "flags", "if", "HAS_SUBPATTERN_FLAGS", ":", "context", ".", "flags", "=", "(", "context", ".", "flags", "|", "value", "[", "1", "]", ")", "&", "~", "value", "[", "2", "]", "strat", "=", "_strategy", "(", "value", "[", "-", "1", "]", ",", "context", ")", "context", ".", "flags", "=", "old_flags", "if", "value", "[", "0", "]", ":", "context", ".", "groups", "[", "value", "[", "0", "]", "]", "=", "strat", "strat", "=", "hs", ".", "shared", "(", "strat", ",", "key", "=", "value", "[", "0", "]", ")", "return", "strat", "elif", "code", "==", "sre", ".", "GROUPREF", ":", "# Regex '\\\\1' or '(?P=name)' (group reference)", "return", "hs", ".", "shared", "(", "context", ".", "groups", "[", "value", "]", ",", "key", "=", "value", ")", "elif", "code", "==", "sre", ".", "ASSERT", ":", "# Regex '(?=...)' or '(?<=...)' (positive lookahead/lookbehind)", "return", "_strategy", "(", "value", "[", "1", "]", ",", "context", ")", "elif", "code", "==", "sre", ".", "ASSERT_NOT", ":", "# Regex '(?!...)' or '(?<!...)' (negative lookahead/lookbehind)", "return", "hs", ".", "just", "(", "''", ")", "elif", "code", "==", "sre", ".", "BRANCH", ":", "# Regex 'a|b|c' (branch)", "return", "hs", ".", "one_of", "(", "[", "_strategy", "(", "branch", ",", "context", ")", "for", "branch", "in", "value", "[", "1", "]", "]", ")", "elif", "code", "in", "[", "sre", ".", "MIN_REPEAT", ",", "sre", ".", "MAX_REPEAT", "]", ":", "# Regexes 'a?', 'a*', 'a+' and their non-greedy variants (repeaters)", "at_least", ",", "at_most", ",", "regex", "=", "value", "if", "at_most", "==", "4294967295", ":", "at_most", "=", "None", "return", "hs", ".", "lists", "(", "_strategy", "(", "regex", ",", "context", ")", ",", "min_size", "=", "at_least", ",", "max_size", "=", "at_most", ")", ".", "map", "(", "''", ".", "join", ")", "elif", "code", "==", "sre", ".", "GROUPREF_EXISTS", ":", "# Regex '(?(id/name)yes-pattern|no-pattern)' (if group exists selection)", "return", "hs", ".", "one_of", "(", "_strategy", "(", "value", "[", "1", "]", ",", "context", ")", ",", "_strategy", "(", "value", "[", "2", "]", ",", "context", ")", "if", "value", "[", "2", "]", "else", "hs", ".", "just", "(", "u''", ")", ",", ")", "else", ":", "raise", "he", ".", "InvalidArgument", "(", "'Unknown code point: %s'", "%", "repr", "(", "code", ")", ")" ]
36.225
20.2875
def fastaAlignmentRead(fasta, mapFn=(lambda x : x), l=None): """ reads in columns of multiple alignment and returns them iteratively """ if l is None: l = _getMultiFastaOffsets(fasta) else: l = l[:] seqNo = len(l) for i in xrange(0, seqNo): j = open(fasta, 'r') j.seek(l[i]) l[i] = j column = [sys.maxint]*seqNo if seqNo != 0: while True: for j in xrange(0, seqNo): i = l[j].read(1) while i == '\n': i = l[j].read(1) column[j] = i if column[0] == '>' or column[0] == '': for j in xrange(1, seqNo): assert column[j] == '>' or column[j] == '' break for j in xrange(1, seqNo): assert column[j] != '>' and column[j] != '' column[j] = mapFn(column[j]) yield column[:] for i in l: i.close()
[ "def", "fastaAlignmentRead", "(", "fasta", ",", "mapFn", "=", "(", "lambda", "x", ":", "x", ")", ",", "l", "=", "None", ")", ":", "if", "l", "is", "None", ":", "l", "=", "_getMultiFastaOffsets", "(", "fasta", ")", "else", ":", "l", "=", "l", "[", ":", "]", "seqNo", "=", "len", "(", "l", ")", "for", "i", "in", "xrange", "(", "0", ",", "seqNo", ")", ":", "j", "=", "open", "(", "fasta", ",", "'r'", ")", "j", ".", "seek", "(", "l", "[", "i", "]", ")", "l", "[", "i", "]", "=", "j", "column", "=", "[", "sys", ".", "maxint", "]", "*", "seqNo", "if", "seqNo", "!=", "0", ":", "while", "True", ":", "for", "j", "in", "xrange", "(", "0", ",", "seqNo", ")", ":", "i", "=", "l", "[", "j", "]", ".", "read", "(", "1", ")", "while", "i", "==", "'\\n'", ":", "i", "=", "l", "[", "j", "]", ".", "read", "(", "1", ")", "column", "[", "j", "]", "=", "i", "if", "column", "[", "0", "]", "==", "'>'", "or", "column", "[", "0", "]", "==", "''", ":", "for", "j", "in", "xrange", "(", "1", ",", "seqNo", ")", ":", "assert", "column", "[", "j", "]", "==", "'>'", "or", "column", "[", "j", "]", "==", "''", "break", "for", "j", "in", "xrange", "(", "1", ",", "seqNo", ")", ":", "assert", "column", "[", "j", "]", "!=", "'>'", "and", "column", "[", "j", "]", "!=", "''", "column", "[", "j", "]", "=", "mapFn", "(", "column", "[", "j", "]", ")", "yield", "column", "[", ":", "]", "for", "i", "in", "l", ":", "i", ".", "close", "(", ")" ]
30.709677
14.322581
def _update_data_dict(self, data_dict, back_or_front): """ Adds spct if relevant, adds service """ data_dict['back_or_front'] = back_or_front # The percentage of used sessions based on 'scur' and 'slim' if 'slim' in data_dict and 'scur' in data_dict: try: data_dict['spct'] = (data_dict['scur'] / data_dict['slim']) * 100 except (TypeError, ZeroDivisionError): pass
[ "def", "_update_data_dict", "(", "self", ",", "data_dict", ",", "back_or_front", ")", ":", "data_dict", "[", "'back_or_front'", "]", "=", "back_or_front", "# The percentage of used sessions based on 'scur' and 'slim'", "if", "'slim'", "in", "data_dict", "and", "'scur'", "in", "data_dict", ":", "try", ":", "data_dict", "[", "'spct'", "]", "=", "(", "data_dict", "[", "'scur'", "]", "/", "data_dict", "[", "'slim'", "]", ")", "*", "100", "except", "(", "TypeError", ",", "ZeroDivisionError", ")", ":", "pass" ]
41.727273
15