text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def UsersUpdate (self, user_id, parameters): """ Update the current user. @param user_id (int) - id of the user to be updated @param parameters (dictionary) - user object to update the user with @return (bool) - Boolean indicating whether UserUpdate was successful. """ if self.__SenseApiCall__('/users/{0}.json'.format(user_id), 'PUT', parameters): return True else: self.__error__ = "api call unsuccessful" return False
[ "def", "UsersUpdate", "(", "self", ",", "user_id", ",", "parameters", ")", ":", "if", "self", ".", "__SenseApiCall__", "(", "'/users/{0}.json'", ".", "format", "(", "user_id", ")", ",", "'PUT'", ",", "parameters", ")", ":", "return", "True", "else", ":", ...
40.214286
20.642857
def exists(self, client=None): """Test whether this notification exists. See: https://cloud.google.com/storage/docs/json_api/v1/notifications/get If :attr:`user_project` is set on the bucket, bills the API request to that project. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :rtype: bool :returns: True, if the notification exists, else False. :raises ValueError: if the notification has no ID. """ if self.notification_id is None: raise ValueError("Notification not intialized by server") client = self._require_client(client) query_params = {} if self.bucket.user_project is not None: query_params["userProject"] = self.bucket.user_project try: client._connection.api_request( method="GET", path=self.path, query_params=query_params ) except NotFound: return False else: return True
[ "def", "exists", "(", "self", ",", "client", "=", "None", ")", ":", "if", "self", ".", "notification_id", "is", "None", ":", "raise", "ValueError", "(", "\"Notification not intialized by server\"", ")", "client", "=", "self", ".", "_require_client", "(", "clie...
33.828571
23.314286
def generate_epochs_info(epoch_list): """ use epoch_list to generate epoch_info defined below Parameters ---------- epoch_list: list of 3D (binary) array in shape [condition, nEpochs, nTRs] Contains specification of epochs and conditions, assuming 1. all subjects have the same number of epochs; 2. len(epoch_list) equals the number of subjects; 3. an epoch is always a continuous time course. Returns ------- epoch_info: list of tuple (label, sid, start, end). label is the condition labels of the epochs; sid is the subject id, corresponding to the index of raw_data; start is the start TR of an epoch (inclusive); end is the end TR of an epoch(exclusive). Assuming len(labels) labels equals the number of epochs and the epochs of the same sid are adjacent in epoch_info """ time1 = time.time() epoch_info = [] for sid, epoch in enumerate(epoch_list): for cond in range(epoch.shape[0]): sub_epoch = epoch[cond, :, :] for eid in range(epoch.shape[1]): r = np.sum(sub_epoch[eid, :]) if r > 0: # there is an epoch in this condition start = np.nonzero(sub_epoch[eid, :])[0][0] epoch_info.append((cond, sid, start, start + r)) time2 = time.time() logger.debug( 'epoch separation done, takes %.2f s' % (time2 - time1) ) return epoch_info
[ "def", "generate_epochs_info", "(", "epoch_list", ")", ":", "time1", "=", "time", ".", "time", "(", ")", "epoch_info", "=", "[", "]", "for", "sid", ",", "epoch", "in", "enumerate", "(", "epoch_list", ")", ":", "for", "cond", "in", "range", "(", "epoch"...
39.486486
18.351351
def select(self, value=None, field=None, **kwargs): """ If the ``field`` argument is present, ``select`` finds a select box on the page and selects a particular option from it. Otherwise it finds an option inside the current scope and selects it. If the select box is a multiple select, ``select`` can be called multiple times to select more than one option. The select box can be found via its name, id, or label text. The option can be found by its text. :: page.select("March", field="Month") Args: value (str, optional): Which option to select. field (str, optional): The id, name, or label of the select box. **kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`. """ if field: self.find("select", field, **kwargs).find("option", value, **kwargs).select_option() else: self.find("option", value, **kwargs).select_option()
[ "def", "select", "(", "self", ",", "value", "=", "None", ",", "field", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "field", ":", "self", ".", "find", "(", "\"select\"", ",", "field", ",", "*", "*", "kwargs", ")", ".", "find", "(", "\"...
48.65
31.25
def _call_member(obj, name, failfast=True, *args, **kwargs): """ Calls the specified method, property or attribute of the given object Parameters ---------- obj : object The object that will be used name : str Name of method, property or attribute failfast : bool If True, will raise an exception when trying a method that doesn't exist. If False, will simply return None in that case args : list, optional, default=[] Arguments to be passed to the method (if any) kwargs: dict """ try: attr = getattr(obj, name) except AttributeError as e: if failfast: raise e else: return None try: if inspect.ismethod(attr): # call function return attr(*args, **kwargs) elif isinstance(attr, property): # call property return obj.attr else: # now it's an Attribute, so we can just return its value return attr except Exception as e: if failfast: raise e else: return None
[ "def", "_call_member", "(", "obj", ",", "name", ",", "failfast", "=", "True", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "attr", "=", "getattr", "(", "obj", ",", "name", ")", "except", "AttributeError", "as", "e", ":", "if", ...
29.777778
19.888889
def produce(self, obj, val, ctx=None): """ factory function to create primitives :param pyswagger.spec.v2_0.objects.Schema obj: spec to construct primitives :param val: value to construct primitives :return: the created primitive """ val = obj.default if val == None else val if val == None: return None obj = deref(obj) ctx = {} if ctx == None else ctx if 'name' not in ctx and hasattr(obj, 'name'): ctx['name'] = obj.name if 'guard' not in ctx: ctx['guard'] = CycleGuard() if 'addp_schema' not in ctx: # Schema Object of additionalProperties ctx['addp_schema'] = None if 'addp' not in ctx: # additionalProperties ctx['addp'] = False if '2nd_pass' not in ctx: # 2nd pass processing function ctx['2nd_pass'] = None if 'factory' not in ctx: # primitive factory ctx['factory'] = self if 'read' not in ctx: # default is in 'read' context ctx['read'] = True # cycle guard ctx['guard'].update(obj) ret = None if obj.type: creater, _2nd = self.get(_type=obj.type, _format=obj.format) if not creater: raise ValueError('Can\'t resolve type from:(' + str(obj.type) + ', ' + str(obj.format) + ')') ret = creater(obj, val, ctx) if _2nd: val = _2nd(obj, ret, val, ctx) ctx['2nd_pass'] = _2nd elif len(obj.properties) or obj.additionalProperties: ret = Model() val = ret.apply_with(obj, val, ctx) if isinstance(ret, (Date, Datetime, Byte, File)): # it's meanless to handle allOf for these types. return ret def _apply(o, r, v, c): if hasattr(ret, 'apply_with'): v = r.apply_with(o, v, c) else: _2nd = c['2nd_pass'] if _2nd == None: _, _2nd = self.get(_type=o.type, _format=o.format) if _2nd: _2nd(o, r, v, c) # update it back to context c['2nd_pass'] = _2nd return v # handle allOf for Schema Object allOf = getattr(obj, 'allOf', None) if allOf: not_applied = [] for a in allOf: a = deref(a) if not ret: # try to find right type for this primitive. ret = self.produce(a, val, ctx) is_member = hasattr(ret, 'apply_with') else: val = _apply(a, ret, val, ctx) if not ret: # if we still can't determine the type, # keep this Schema object for later use. not_applied.append(a) if ret: for a in not_applied: val = _apply(a, ret, val, ctx) if ret != None and hasattr(ret, 'cleanup'): val = ret.cleanup(val, ctx) return ret
[ "def", "produce", "(", "self", ",", "obj", ",", "val", ",", "ctx", "=", "None", ")", ":", "val", "=", "obj", ".", "default", "if", "val", "==", "None", "else", "val", "if", "val", "==", "None", ":", "return", "None", "obj", "=", "deref", "(", "...
33.483871
15.526882
def writeline(self, addition): """writeline() Functions like a file.writeline() call; however, it stores into the object's cached memory rather than a file's IO. """ addition = addition.strip() self.held = self.held + addition + "\n"
[ "def", "writeline", "(", "self", ",", "addition", ")", ":", "addition", "=", "addition", ".", "strip", "(", ")", "self", ".", "held", "=", "self", ".", "held", "+", "addition", "+", "\"\\n\"" ]
32.375
12.5
def zipline_magic(line, cell=None): """The zipline IPython cell magic. """ load_extensions( default=True, extensions=[], strict=True, environ=os.environ, ) try: return run.main( # put our overrides at the start of the parameter list so that # users may pass values with higher precedence [ '--algotext', cell, '--output', os.devnull, # don't write the results by default ] + ([ # these options are set when running in line magic mode # set a non None algo text to use the ipython user_ns '--algotext', '', '--local-namespace', ] if cell is None else []) + line.split(), '%s%%zipline' % ((cell or '') and '%'), # don't use system exit and propogate errors to the caller standalone_mode=False, ) except SystemExit as e: # https://github.com/mitsuhiko/click/pull/533 # even in standalone_mode=False `--help` really wants to kill us ;_; if e.code: raise ValueError('main returned non-zero status code: %d' % e.code)
[ "def", "zipline_magic", "(", "line", ",", "cell", "=", "None", ")", ":", "load_extensions", "(", "default", "=", "True", ",", "extensions", "=", "[", "]", ",", "strict", "=", "True", ",", "environ", "=", "os", ".", "environ", ",", ")", "try", ":", ...
38.129032
19.580645
def runtime_deps(self): # install_requires """Returns list of runtime dependencies of the package specified in setup.py. Dependencies are in RPM SPECFILE format - see dependency_to_rpm() for details, but names are already transformed according to current distro. Returns: list of runtime dependencies of the package """ install_requires = self.metadata['install_requires'] if self.metadata[ 'entry_points'] and 'setuptools' not in install_requires: install_requires.append('setuptools') # entrypoints return sorted(self.name_convert_deps_list(deps_from_pyp_format( install_requires, runtime=True)))
[ "def", "runtime_deps", "(", "self", ")", ":", "# install_requires", "install_requires", "=", "self", ".", "metadata", "[", "'install_requires'", "]", "if", "self", ".", "metadata", "[", "'entry_points'", "]", "and", "'setuptools'", "not", "in", "install_requires",...
39.888889
21.666667
def _exception_message(excp): """Return the message from an exception as either a str or unicode object. Supports both Python 2 and Python 3. >>> msg = "Exception message" >>> excp = Exception(msg) >>> msg == _exception_message(excp) True >>> msg = u"unicöde" >>> excp = Exception(msg) >>> msg == _exception_message(excp) True """ if isinstance(excp, Py4JJavaError): # 'Py4JJavaError' doesn't contain the stack trace available on the Java side in 'message' # attribute in Python 2. We should call 'str' function on this exception in general but # 'Py4JJavaError' has an issue about addressing non-ascii strings. So, here we work # around by the direct call, '__str__()'. Please see SPARK-23517. return excp.__str__() if hasattr(excp, "message"): return excp.message return str(excp)
[ "def", "_exception_message", "(", "excp", ")", ":", "if", "isinstance", "(", "excp", ",", "Py4JJavaError", ")", ":", "# 'Py4JJavaError' doesn't contain the stack trace available on the Java side in 'message'", "# attribute in Python 2. We should call 'str' function on this exception in...
37.695652
20.217391
def creator(entry, config): """Preparing and creating script.""" script = render(config.script, model=config.model, env=config.env, item=config.item) temp = tempfile.NamedTemporaryFile(prefix="script-", suffix=".py", mode='w+t', delete=False) temp.writelines(script) temp.close() language = 'python' if 'type' not in entry else entry['type'] template_file = os.path.join(os.path.dirname(__file__), 'templates/%s-script.sh.j2' % language) with open(template_file) as handle: template = handle.read() config.script = render(template, script=temp.name) return Script(config)
[ "def", "creator", "(", "entry", ",", "config", ")", ":", "script", "=", "render", "(", "config", ".", "script", ",", "model", "=", "config", ".", "model", ",", "env", "=", "config", ".", "env", ",", "item", "=", "config", ".", "item", ")", "temp", ...
41
27.875
def calledWithMatch(cls, spy, *args, **kwargs): #pylint: disable=invalid-name """ Checking the inspector is called with partial SinonMatcher(args/kwargs) Args: SinonSpy, args/kwargs """ cls.__is_spy(spy) if not (spy.calledWithMatch(*args, **kwargs)): raise cls.failException(cls.message)
[ "def", "calledWithMatch", "(", "cls", ",", "spy", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "#pylint: disable=invalid-name", "cls", ".", "__is_spy", "(", "spy", ")", "if", "not", "(", "spy", ".", "calledWithMatch", "(", "*", "args", ",", "*"...
42.5
14.75
def to_match(self): """Return a unicode object with the MATCH representation of this expression.""" self.validate() mark_name, field_name = self.fold_scope_location.get_location_name() validate_safe_string(mark_name) template = u'$%(mark_name)s.%(field_name)s' template_data = { 'mark_name': mark_name, } if field_name == COUNT_META_FIELD_NAME: template_data['field_name'] = 'size()' else: inner_type = strip_non_null_from_type(self.field_type.of_type) if GraphQLDate.is_same_type(inner_type): # Known OrientDB bug may cause trouble here, and incorrect data may be returned: # https://github.com/orientechnologies/orientdb/issues/7289 template += '.format("' + STANDARD_DATE_FORMAT + '")' elif GraphQLDateTime.is_same_type(inner_type): # Known OrientDB bug may cause trouble here, and incorrect data may be returned: # https://github.com/orientechnologies/orientdb/issues/7289 template += '.format("' + STANDARD_DATETIME_FORMAT + '")' template_data['field_name'] = field_name return template % template_data
[ "def", "to_match", "(", "self", ")", ":", "self", ".", "validate", "(", ")", "mark_name", ",", "field_name", "=", "self", ".", "fold_scope_location", ".", "get_location_name", "(", ")", "validate_safe_string", "(", "mark_name", ")", "template", "=", "u'$%(mark...
44.035714
25.071429
def hierarch_cluster(M): """Cluster matrix using hierarchical clustering. Parameters ---------- M : np.ndarray Matrix, for example, distance matrix. Returns ------- Mclus : np.ndarray Clustered matrix. indices : np.ndarray Indices used to cluster the matrix. """ import scipy as sp import scipy.cluster link = sp.cluster.hierarchy.linkage(M) indices = sp.cluster.hierarchy.leaves_list(link) Mclus = np.array(M[:, indices]) Mclus = Mclus[indices, :] if False: pl.matshow(Mclus) pl.colorbar() return Mclus, indices
[ "def", "hierarch_cluster", "(", "M", ")", ":", "import", "scipy", "as", "sp", "import", "scipy", ".", "cluster", "link", "=", "sp", ".", "cluster", ".", "hierarchy", ".", "linkage", "(", "M", ")", "indices", "=", "sp", ".", "cluster", ".", "hierarchy",...
23.92
17
def load_environment(global_conf, app_conf): """Configure the application environment.""" conf.update(strings.deep_decode(global_conf)) conf.update(strings.deep_decode(app_conf)) conf.update(conv.check(conv.struct( { 'app_conf': conv.set_value(app_conf), 'app_dir': conv.set_value(app_dir), 'country_package': conv.pipe( conv.make_input_to_slug(separator = u'_'), conv.not_none, ), 'debug': conv.pipe(conv.guess_bool, conv.default(False)), 'global_conf': conv.set_value(global_conf), 'i18n_dir': conv.default(os.path.join(app_dir, 'i18n')), 'load_alert': conv.pipe(conv.guess_bool, conv.default(False)), 'log_level': conv.pipe( conv.default('WARNING'), conv.function(lambda log_level: getattr(logging, log_level.upper())), ), 'package_name': conv.default('openfisca-web-api'), 'realm': conv.default(u'OpenFisca Web API'), 'reforms': conv.ini_str_to_list, # Another validation is done below. 'extensions': conv.ini_str_to_list, }, default = 'drop', ))(conf)) # Configure logging. logging.basicConfig(level = conf['log_level'], stream = sys.stderr) errorware = conf.setdefault('errorware', {}) errorware['debug'] = conf['debug'] if not errorware['debug']: errorware['error_email'] = conf['email_to'] errorware['error_log'] = conf.get('error_log', None) errorware['error_message'] = conf.get('error_message', 'An internal server error occurred') errorware['error_subject_prefix'] = conf.get('error_subject_prefix', 'OpenFisca Web API Error: ') errorware['from_address'] = conf['from_address'] errorware['smtp_server'] = conf.get('smtp_server', 'localhost') errorware['show_exceptions_in_wsgi_errors'] = conf.get('show_exceptions_in_wsgi_errors', True) # Initialize tax-benefit system. country_package = importlib.import_module(conf['country_package']) tax_benefit_system = country_package.CountryTaxBenefitSystem() extensions = conf['extensions'] if extensions is not None: for extension in extensions: tax_benefit_system.load_extension(extension) class Scenario(tax_benefit_system.Scenario): instance_and_error_couple_cache = {} if conf['debug'] else weakref.WeakValueDictionary() # class attribute @classmethod def make_json_to_cached_or_new_instance(cls, ctx, repair, tax_benefit_system): def json_to_cached_or_new_instance(value, state = None): key = (unicode(ctx.lang), unicode(value), repair, tax_benefit_system) instance_and_error_couple = cls.instance_and_error_couple_cache.get(key) if instance_and_error_couple is None: instance_and_error_couple = cls.make_json_to_instance(repair, tax_benefit_system)( value, state = state or conv.default_state) # Note: Call to ValueAndError() is needed below, otherwise it raises TypeError: cannot create # weak reference to 'tuple' object. cls.instance_and_error_couple_cache[key] = ValueAndError(instance_and_error_couple) return instance_and_error_couple return json_to_cached_or_new_instance tax_benefit_system.Scenario = Scenario model.tax_benefit_system = tax_benefit_system log.debug(u'Pre-fill tax and benefit system cache.') tax_benefit_system.prefill_cache() log.debug(u'Initialize reforms.') reforms = conv.check( conv.uniform_sequence( conv.module_and_function_str_to_function, ) )(conf['reforms']) model.reforms = {} model.reformed_tbs = {} if reforms is not None: for reform in reforms: reformed_tbs = reform(tax_benefit_system) key = reformed_tbs.key full_key = reformed_tbs.full_key model.reforms[key] = reform model.reformed_tbs[full_key] = reformed_tbs log.debug(u'Cache default decomposition.') if tax_benefit_system.decomposition_file_path is not None: # Ignore the returned value, because we just want to pre-compute the cache. model.get_cached_or_new_decomposition_json(tax_benefit_system) log.debug(u'Initialize lib2to3-based input variables extractor.') global country_package_dir_path # - Do not use pkg_resources.get_distribution(conf["country_package"]).location # because it returns a wrong path in virtualenvs (<venv>/lib versus <venv>/local/lib) # - Use os.path.abspath because when the web API is runned in development with "paster serve", # __path__[0] == 'openfisca_france' for example. Then, get_relative_file_path won't be able # to find the relative path of an already relative path. country_package_dir_path = os.path.abspath(country_package.__path__[0]) global api_package_version api_package_version = pkg_resources.get_distribution('openfisca_web_api').version global country_package_version country_package_version = pkg_resources.get_distribution(conf["country_package"]).version log.debug(u'Cache legislation parmeters') legislation = tax_benefit_system.parameters parameters = [] walk_legislation( legislation, descriptions = [], parameters = parameters, path_fragments = [], ) model.parameters_cache = parameters if not conf['debug']: # Do this after tax_benefit_system.get_legislation(). log.debug(u'Compute and cache compact legislation for each first day of month since at least 2 legal years.') today = periods.instant(datetime.date.today()) first_day_of_year = today.offset('first-of', 'year') instant = first_day_of_year.offset(-2, 'year') two_years_later = first_day_of_year.offset(2, 'year') while instant < two_years_later: tax_benefit_system.get_parameters_at_instant(instant) instant = instant.offset(1, 'month') # Initialize multiprocessing and load_alert if conf['load_alert']: global cpu_count cpu_count = multiprocessing.cpu_count() if conf.get('tracker_url') and conf.get('tracker_idsite'): wsgihelpers.init_tracker(conf['tracker_url'], conf['tracker_idsite'])
[ "def", "load_environment", "(", "global_conf", ",", "app_conf", ")", ":", "conf", ".", "update", "(", "strings", ".", "deep_decode", "(", "global_conf", ")", ")", "conf", ".", "update", "(", "strings", ".", "deep_decode", "(", "app_conf", ")", ")", "conf",...
44.894366
23.852113
def from_dictionary(cls, dictionary): """Parse a dictionary representing all command line parameters.""" if not isinstance(dictionary, dict): raise TypeError('dictionary has to be a dict type, got: {}'.format(type(dictionary))) return cls(dictionary)
[ "def", "from_dictionary", "(", "cls", ",", "dictionary", ")", ":", "if", "not", "isinstance", "(", "dictionary", ",", "dict", ")", ":", "raise", "TypeError", "(", "'dictionary has to be a dict type, got: {}'", ".", "format", "(", "type", "(", "dictionary", ")", ...
47
19
def raise_if(self, exception, message, *args, **kwargs): """ If current exception has smaller priority than minimum, subclass of this class only warns user, otherwise normal exception will be raised. """ if issubclass(exception, self.minimum_defect): raise exception(*args, **kwargs) warn(message, SyntaxWarning, *args, **kwargs)
[ "def", "raise_if", "(", "self", ",", "exception", ",", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "issubclass", "(", "exception", ",", "self", ".", "minimum_defect", ")", ":", "raise", "exception", "(", "*", "args", ",", "*...
47.75
15
def _multiplyThroughputs(self): ''' Overrides base class in order to deal with opaque components. ''' index = 0 for component in self.components: if component.throughput != None: break index += 1 return BaseObservationMode._multiplyThroughputs(self, index)
[ "def", "_multiplyThroughputs", "(", "self", ")", ":", "index", "=", "0", "for", "component", "in", "self", ".", "components", ":", "if", "component", ".", "throughput", "!=", "None", ":", "break", "index", "+=", "1", "return", "BaseObservationMode", ".", "...
32.8
20.4
def Handle_Search(self, msg): """ Handle a search. :param msg: the received search :type msg: dict :returns: The message to reply with :rtype: str """ search_term = msg['object']['searchTerm'] results = self.db.searchForItem(search_term) reply = {"status": "OK", "type": "search", "object": { "received search": msg['object']['searchTerm'], "results": results} } return json.dumps(reply)
[ "def", "Handle_Search", "(", "self", ",", "msg", ")", ":", "search_term", "=", "msg", "[", "'object'", "]", "[", "'searchTerm'", "]", "results", "=", "self", ".", "db", ".", "searchForItem", "(", "search_term", ")", "reply", "=", "{", "\"status\"", ":", ...
26.857143
16.47619
def ldSet(self, what, key, value): """List/dictionary-aware set.""" if isListKey(key): # Make sure we keep the indexes consistent, insert missing_values # as necessary. We do remember the lists, so that we can remove # missing values after inserting all values from all selectors. self.lists[id(what)] = what ix = listKeyIndex(key) while len(what) <= ix: what.append(missing_value) what[ix] = value else: what[key] = value return value
[ "def", "ldSet", "(", "self", ",", "what", ",", "key", ",", "value", ")", ":", "if", "isListKey", "(", "key", ")", ":", "# Make sure we keep the indexes consistent, insert missing_values", "# as necessary. We do remember the lists, so that we can remove", "# missing values aft...
35.285714
17.142857
def rpy2(): '''Lazily import the rpy2 module''' if LazyImport.rpy2_module is None: try: rpy2 = __import__('rpy2.robjects') except ImportError: raise ImportError('The rpy2 module is required') LazyImport.rpy2_module = rpy2 try: rpy2.forecast = rpy2.robjects.packages.importr('forecast') except: raise ImportError('R and the "forecast" package are required') rpy2.ts = rpy2.robjects.r['ts'] __import__('rpy2.robjects.numpy2ri') rpy2.robjects.numpy2ri.activate() return LazyImport.rpy2_module
[ "def", "rpy2", "(", ")", ":", "if", "LazyImport", ".", "rpy2_module", "is", "None", ":", "try", ":", "rpy2", "=", "__import__", "(", "'rpy2.robjects'", ")", "except", "ImportError", ":", "raise", "ImportError", "(", "'The rpy2 module is required'", ")", "LazyI...
41.125
14.875
def find_file(path, filename, max_depth=5): """Returns full filepath if the file is in path or a subdirectory.""" for root, dirs, files in os.walk(path): if filename in files: return os.path.join(root, filename) # Don't search past max_depth depth = root[len(path) + 1:].count(os.sep) if depth > max_depth: del dirs[:] # Clear dirs return None
[ "def", "find_file", "(", "path", ",", "filename", ",", "max_depth", "=", "5", ")", ":", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "path", ")", ":", "if", "filename", "in", "files", ":", "return", "os", ".", "path", "...
33.545455
11.272727
def add_nic(self, uuid, type, id=None, hwaddr=None): """ Add a nic to a machine :param uuid: uuid of the kvm container (same as the used in create) :param type: nic_type # default, bridge, vlan, or vxlan (note, vlan and vxlan only supported by ovs) param id: id # depends on the type, bridge name (bridge type) zerotier network id (zertier type), the vlan tag or the vxlan id param hwaddr: the hardware address of the nic :return: """ args = { 'uuid': uuid, 'type': type, 'id': id, 'hwaddr': hwaddr, } self._man_nic_action_chk.check(args) return self._client.json('kvm.add_nic', args)
[ "def", "add_nic", "(", "self", ",", "uuid", ",", "type", ",", "id", "=", "None", ",", "hwaddr", "=", "None", ")", ":", "args", "=", "{", "'uuid'", ":", "uuid", ",", "'type'", ":", "type", ",", "'id'", ":", "id", ",", "'hwaddr'", ":", "hwaddr", ...
39.666667
23.888889
def p_pragma_assign(self, p): 'pragma : LPAREN TIMES ID EQUALS expression TIMES RPAREN' p[0] = Pragma(PragmaEntry(p[3], p[5], lineno=p.lineno(1)), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
[ "def", "p_pragma_assign", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "Pragma", "(", "PragmaEntry", "(", "p", "[", "3", "]", ",", "p", "[", "5", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", ",", "lineno", "...
47.4
13.4
def send_empty(self, message): """ Eventually remove from the observer list in case of a RST message. :type message: Message :param message: the message :return: the message unmodified """ host, port = message.destination key_token = hash(str(host) + str(port) + str(message.token)) if key_token in self._relations and message.type == defines.Types["RST"]: del self._relations[key_token] return message
[ "def", "send_empty", "(", "self", ",", "message", ")", ":", "host", ",", "port", "=", "message", ".", "destination", "key_token", "=", "hash", "(", "str", "(", "host", ")", "+", "str", "(", "port", ")", "+", "str", "(", "message", ".", "token", ")"...
37.153846
14.538462
def inserir(self, id_script_type, script, model, description): """Inserts a new Script and returns its identifier. :param id_script_type: Identifier of the Script Type. Integer value and greater than zero. :param script: Script name. String with a minimum 3 and maximum of 40 characters :param description: Script description. String with a minimum 3 and maximum of 100 characters :return: Dictionary with the following structure: :: {'script': {'id': < id_script >}} :raise InvalidParameterError: The identifier of Script Type, script or description is null and invalid. :raise TipoRoteiroNaoExisteError: Script Type not registered. :raise NomeRoteiroDuplicadoError: Script already registered with informed. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ script_map = dict() script_map['id_script_type'] = id_script_type script_map['script'] = script script_map['model'] = model script_map['description'] = description code, xml = self.submit({'script': script_map}, 'POST', 'script/') return self.response(code, xml)
[ "def", "inserir", "(", "self", ",", "id_script_type", ",", "script", ",", "model", ",", "description", ")", ":", "script_map", "=", "dict", "(", ")", "script_map", "[", "'id_script_type'", "]", "=", "id_script_type", "script_map", "[", "'script'", "]", "=", ...
44.571429
28.642857
def get_structure_with_nodes(self): """ Get the modified structure with the voronoi nodes inserted. The species is set as a DummySpecie X. """ new_s = Structure.from_sites(self.structure) for v in self.vnodes: new_s.append("X", v.frac_coords) return new_s
[ "def", "get_structure_with_nodes", "(", "self", ")", ":", "new_s", "=", "Structure", ".", "from_sites", "(", "self", ".", "structure", ")", "for", "v", "in", "self", ".", "vnodes", ":", "new_s", ".", "append", "(", "\"X\"", ",", "v", ".", "frac_coords", ...
35
9.444444
def replace(self, text=None): """ Replaces the selected occurrence. :param text: The replacement text. If it is None, the lineEditReplace's text is used instead. :return True if the text could be replace properly, False if there is no more occurrences to replace. """ if text is None or isinstance(text, bool): text = self.lineEditReplace.text() current_occurences = self._current_occurrence() occurrences = self.get_occurences() if current_occurences == -1: self.select_next() current_occurences = self._current_occurrence() try: # prevent search request due to editor textChanged try: self.editor.textChanged.disconnect(self.request_search) except (RuntimeError, TypeError): # already disconnected pass occ = occurrences[current_occurences] cursor = self.editor.textCursor() cursor.setPosition(occ[0]) cursor.setPosition(occ[1], cursor.KeepAnchor) len_to_replace = len(cursor.selectedText()) len_replacement = len(text) offset = len_replacement - len_to_replace cursor.insertText(text) self.editor.setTextCursor(cursor) self._remove_occurrence(current_occurences, offset) current_occurences -= 1 self._set_current_occurrence(current_occurences) self.select_next() self.cpt_occurences = len(self.get_occurences()) self._update_label_matches() self._update_buttons() return True except IndexError: return False finally: self.editor.textChanged.connect(self.request_search)
[ "def", "replace", "(", "self", ",", "text", "=", "None", ")", ":", "if", "text", "is", "None", "or", "isinstance", "(", "text", ",", "bool", ")", ":", "text", "=", "self", ".", "lineEditReplace", ".", "text", "(", ")", "current_occurences", "=", "sel...
40.244444
13.933333
def iter_keys(self, number=-1, etag=None): """Iterate over the public keys of this user. .. versionadded:: 0.5 :param int number: (optional), number of keys to return. Default: -1 returns all available keys :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`Key <Key>`\ s """ url = self._build_url('keys', base_url=self._api) return self._iter(int(number), url, Key, etag=etag)
[ "def", "iter_keys", "(", "self", ",", "number", "=", "-", "1", ",", "etag", "=", "None", ")", ":", "url", "=", "self", ".", "_build_url", "(", "'keys'", ",", "base_url", "=", "self", ".", "_api", ")", "return", "self", ".", "_iter", "(", "int", "...
39.538462
18.153846
def procces_filters(all_needs, current_needlist): """ Filters all needs with given configuration :param current_needlist: needlist object, which stores all filters :param all_needs: List of all needs inside document :return: list of needs, which passed the filters """ if current_needlist["sort_by"] is not None: if current_needlist["sort_by"] == "id": all_needs = sorted(all_needs, key=lambda node: node["id"]) elif current_needlist["sort_by"] == "status": all_needs = sorted(all_needs, key=status_sorter) found_needs_by_options = [] # Add all need_parts of given needs to the search list all_needs_incl_parts = prepare_need_list(all_needs) for need_info in all_needs_incl_parts: status_filter_passed = False if current_needlist["status"] is None or len(current_needlist["status"]) == 0: # Filtering for status was not requested status_filter_passed = True elif need_info["status"] is not None and need_info["status"] in current_needlist["status"]: # Match was found status_filter_passed = True tags_filter_passed = False if len(set(need_info["tags"]) & set(current_needlist["tags"])) > 0 or len(current_needlist["tags"]) == 0: tags_filter_passed = True type_filter_passed = False if need_info["type"] in current_needlist["types"] \ or need_info["type_name"] in current_needlist["types"] \ or len(current_needlist["types"]) == 0: type_filter_passed = True if status_filter_passed and tags_filter_passed and type_filter_passed: found_needs_by_options.append(need_info) found_needs_by_string = filter_needs(all_needs_incl_parts, current_needlist["filter"]) # found_needs = [x for x in found_needs_by_string if x in found_needs_by_options] found_needs = check_need_list(found_needs_by_options, found_needs_by_string) return found_needs
[ "def", "procces_filters", "(", "all_needs", ",", "current_needlist", ")", ":", "if", "current_needlist", "[", "\"sort_by\"", "]", "is", "not", "None", ":", "if", "current_needlist", "[", "\"sort_by\"", "]", "==", "\"id\"", ":", "all_needs", "=", "sorted", "(",...
39.54
24.14
def add_whitelist_entry(self, address, netmask, note=None): """ Adds a new entry to this user's IP whitelist, if enabled """ result = self._client.post("{}/whitelist".format(Profile.api_endpoint), data={ "address": address, "netmask": netmask, "note": note, }) if not 'id' in result: raise UnexpectedResponseError("Unexpected response creating whitelist entry!") return WhitelistEntry(result['id'], self._client, json=result)
[ "def", "add_whitelist_entry", "(", "self", ",", "address", ",", "netmask", ",", "note", "=", "None", ")", ":", "result", "=", "self", ".", "_client", ".", "post", "(", "\"{}/whitelist\"", ".", "format", "(", "Profile", ".", "api_endpoint", ")", ",", "dat...
37.133333
20.6
def list_buckets(self, instance): """ List the buckets for an instance. :param str instance: A Yamcs instance name. :rtype: ~collections.Iterable[.Bucket] """ # Server does not do pagination on listings of this resource. # Return an iterator anyway for similarity with other API methods response = self._client.get_proto(path='/buckets/' + instance) message = rest_pb2.ListBucketsResponse() message.ParseFromString(response.content) buckets = getattr(message, 'bucket') return iter([ Bucket(bucket, instance, self) for bucket in buckets])
[ "def", "list_buckets", "(", "self", ",", "instance", ")", ":", "# Server does not do pagination on listings of this resource.", "# Return an iterator anyway for similarity with other API methods", "response", "=", "self", ".", "_client", ".", "get_proto", "(", "path", "=", "'...
42.2
14.866667
def ssa(n,h,K,f,T): """ssa -- multi-stage (serial) safety stock allocation model Parameters: - n: number of stages - h[i]: inventory cost on stage i - K: number of linear segments - f: (non-linear) cost function - T[i]: production lead time on stage i Returns the model with the piecewise linear relation on added variables x, f, and z. """ model = Model("safety stock allocation") # calculate endpoints for linear segments a,b = {},{} for i in range(1,n+1): a[i] = [k for k in range(K)] b[i] = [f(i,k) for k in range(K)] # x: net replenishment time for stage i # y: corresponding cost # s: piecewise linear segment of variable x x,y,s = {},{},{} L = {} # service time of stage i for i in range(1,n+1): x[i],y[i],s[i] = convex_comb_sos(model,a[i],b[i]) if i == 1: L[i] = model.addVar(ub=0, vtype="C", name="L[%s]"%i) else: L[i] = model.addVar(vtype="C", name="L[%s]"%i) L[n+1] = model.addVar(ub=0, vtype="C", name="L[%s]"%(n+1)) for i in range(1,n+1): # net replenishment time for each stage i model.addCons(x[i] + L[i] == T[i] + L[i+1]) model.setObjective(quicksum(h[i]*y[i] for i in range(1,n+1)), "minimize") model.data = x,s,L return model
[ "def", "ssa", "(", "n", ",", "h", ",", "K", ",", "f", ",", "T", ")", ":", "model", "=", "Model", "(", "\"safety stock allocation\"", ")", "# calculate endpoints for linear segments", "a", ",", "b", "=", "{", "}", ",", "{", "}", "for", "i", "in", "ran...
32.65
17.825
def get_covers(work, args): """ Get missing covers. """ with contextlib.ExitStack() as cm: if args.filename == EMBEDDED_ALBUM_ART_SYMBOL: tmp_prefix = "%s_" % (os.path.splitext(os.path.basename(inspect.getfile(inspect.currentframe())))[0]) tmp_dir = cm.enter_context(tempfile.TemporaryDirectory(prefix=tmp_prefix)) # setup progress report stats = collections.OrderedDict(((k, 0) for k in("ok", "errors", "no result found"))) progress = cm.enter_context(tqdm.tqdm(total=len(work), miniters=1, desc="Searching covers", unit="cover", postfix=stats)) cm.enter_context(tqdm_logging.redirect_logging(progress)) def update_progress(future): path, cover_filepath, artist, album = futures[future] try: status = future.result() except Exception as exception: stats["errors"] += 1 logging.getLogger("sacad_r").error("Error occured while searching cover for " "'%s' by '%s' from '%s': %s %s" % (album, artist, path, exception.__class__.__qualname__, exception)) else: if status: if args.filename == EMBEDDED_ALBUM_ART_SYMBOL: try: embed_album_art(cover_filepath, path) except Exception as exception: stats["errors"] += 1 logging.getLogger("sacad_r").error("Error occured while embedding cover for " "'%s' by '%s' from '%s': %s %s" % (album, artist, path, exception.__class__.__qualname__, exception)) else: stats["ok"] += 1 finally: os.remove(cover_filepath) else: stats["ok"] += 1 else: stats["no result found"] += 1 logging.getLogger("sacad_r").warning("Unable to find cover for '%s' by '%s' from '%s'" % (album, artist, path)) progress.set_postfix(stats, refresh=False) progress.update(1) # post work async_loop = asyncio.get_event_loop() i = 0 # default event loop on Windows has a 512 fd limit, see https://docs.python.org/3/library/asyncio-eventloops.html#windows # also on Linux default max open fd limit is 1024 (ulimit -n) # so work in smaller chunks to avoid hitting fd limit # this also updates the progress faster (instead of working on all searches, work on finishing the chunk before # getting to the next one) work_chunk_length = 16 for work_chunk in ichunk(work.items(), work_chunk_length): futures = {} for i, (path, (artist, album)) in enumerate(work_chunk, i): if args.filename == EMBEDDED_ALBUM_ART_SYMBOL: cover_filepath = os.path.join(tmp_dir, "%00u.%s" % (i, args.format.name.lower())) else: cover_filepath = os.path.join(path, args.filename) coroutine = sacad.search_and_download(album, artist, args.format, args.size, cover_filepath, size_tolerance_prct=args.size_tolerance_prct, amazon_tlds=args.amazon_tlds, no_lq_sources=args.no_lq_sources, async_loop=async_loop) future = asyncio.ensure_future(coroutine, loop=async_loop) futures[future] = (path, cover_filepath, artist, album) for future in futures: future.add_done_callback(update_progress) # wait for end of work root_future = asyncio.gather(*futures.keys(), loop=async_loop) async_loop.run_until_complete(root_future)
[ "def", "get_covers", "(", "work", ",", "args", ")", ":", "with", "contextlib", ".", "ExitStack", "(", ")", "as", "cm", ":", "if", "args", ".", "filename", "==", "EMBEDDED_ALBUM_ART_SYMBOL", ":", "tmp_prefix", "=", "\"%s_\"", "%", "(", "os", ".", "path", ...
51.284091
27.340909
def int(self, *args): """ Return the integer stored in the specified node. Any type of integer will be decoded: byte, short, long, long long """ data = self.bytes(*args) if data is not None: if len(data) == 1: return struct.unpack("<B", data)[0] if len(data) == 2: return struct.unpack("<H", data)[0] if len(data) == 4: return struct.unpack("<L", data)[0] if len(data) == 8: return struct.unpack("<Q", data)[0] print("can't get int from %s" % hexdump(data))
[ "def", "int", "(", "self", ",", "*", "args", ")", ":", "data", "=", "self", ".", "bytes", "(", "*", "args", ")", "if", "data", "is", "not", "None", ":", "if", "len", "(", "data", ")", "==", "1", ":", "return", "struct", ".", "unpack", "(", "\...
35.055556
14.833333
def _fill_pixels(one, other): # type: (_Raster, _Raster) -> _Raster """Merges two single band rasters with the same band by filling the pixels according to depth. """ assert len(one.band_names) == len(other.band_names) == 1, "Rasters are not single band" # We raise an error in the intersection is empty. # Other options include returning an "empty" raster or just None. # The problem with the former is that GeoRaster2 expects a 2D or 3D # numpy array, so there is no obvious way to signal that this raster # has no bands. Also, returning a (1, 1, 0) numpy array is useless # for future concatenation, so the expected shape should be used # instead. The problem with the latter is that it breaks concatenation # anyway and requires special attention. Suggestions welcome. if one.band_names != other.band_names: raise ValueError("rasters have no bands in common, use another merge strategy") new_image = one.image.copy() other_image = other.image # The values that I want to mask are the ones that: # * Were already masked in the other array, _or_ # * Were already unmasked in the one array, so I don't overwrite them other_values_mask = (np.ma.getmaskarray(other_image)[0] | (~np.ma.getmaskarray(one.image)[0])) # Reshape the mask to fit the future array other_values_mask = other_values_mask[None, ...] # Overwrite the values that I don't want to mask new_image[~other_values_mask] = other_image[~other_values_mask] # In other words, the values that I wanted to write are the ones that: # * Were already masked in the one array, _and_ # * Were not masked in the other array # The reason for using the inverted form is to retain the semantics # of "masked=True" that apply for masked arrays. The same logic # could be written, using the De Morgan's laws, as # other_values_mask = (one.image.mask[0] & (~other_image.mask[0]) # other_values_mask = other_values_mask[None, ...] # new_image[other_values_mask] = other_image[other_values_mask] # but here the word "mask" does not mean the same as in masked arrays. return _Raster(image=new_image, band_names=one.band_names)
[ "def", "_fill_pixels", "(", "one", ",", "other", ")", ":", "# type: (_Raster, _Raster) -> _Raster", "assert", "len", "(", "one", ".", "band_names", ")", "==", "len", "(", "other", ".", "band_names", ")", "==", "1", ",", "\"Rasters are not single band\"", "# We r...
49.5
24.840909
def scan(self): """Scan this node's dependents for implicit dependencies.""" # Don't bother scanning non-derived files, because we don't # care what their dependencies are. # Don't scan again, if we already have scanned. if self.implicit is not None: return self.implicit = [] self.implicit_set = set() self._children_reset() if not self.has_builder(): return build_env = self.get_build_env() executor = self.get_executor() # Here's where we implement --implicit-cache. if implicit_cache and not implicit_deps_changed: implicit = self.get_stored_implicit() if implicit is not None: # We now add the implicit dependencies returned from the # stored .sconsign entry to have already been converted # to Nodes for us. (We used to run them through a # source_factory function here.) # Update all of the targets with them. This # essentially short-circuits an N*M scan of the # sources for each individual target, which is a hell # of a lot more efficient. for tgt in executor.get_all_targets(): tgt.add_to_implicit(implicit) if implicit_deps_unchanged or self.is_up_to_date(): return # one of this node's sources has changed, # so we must recalculate the implicit deps for all targets for tgt in executor.get_all_targets(): tgt.implicit = [] tgt.implicit_set = set() # Have the executor scan the sources. executor.scan_sources(self.builder.source_scanner) # If there's a target scanner, have the executor scan the target # node itself and associated targets that might be built. scanner = self.get_target_scanner() if scanner: executor.scan_targets(scanner)
[ "def", "scan", "(", "self", ")", ":", "# Don't bother scanning non-derived files, because we don't", "# care what their dependencies are.", "# Don't scan again, if we already have scanned.", "if", "self", ".", "implicit", "is", "not", "None", ":", "return", "self", ".", "impl...
41.9375
17.729167
def put(self, request, id=None, **kwargs): """ Handles put requests. """ if id: obj = get_object_or_404(self.queryset(request), id=id) if not self.has_update_permission(request, obj): return HttpResponseForbidden(_('You do not have permission to perform this action.')) else: return self.update_object(request, obj) else: # No putting on a collection. return HttpResponseForbidden()
[ "def", "put", "(", "self", ",", "request", ",", "id", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "id", ":", "obj", "=", "get_object_or_404", "(", "self", ".", "queryset", "(", "request", ")", ",", "id", "=", "id", ")", "if", "not", "...
38.615385
16.461538
def process_paths(options, candidates=None, error=True): """Process files and log errors.""" errors = check_path(options, rootdir=CURDIR, candidates=candidates) if options.format in ['pycodestyle', 'pep8']: pattern = "%(filename)s:%(lnum)s:%(col)s: %(text)s" elif options.format == 'pylint': pattern = "%(filename)s:%(lnum)s: [%(type)s] %(text)s" else: # 'parsable' pattern = "%(filename)s:%(lnum)s:%(col)s: [%(type)s] %(text)s" for er in errors: if options.abspath: er._info['filename'] = op.abspath(er.filename) LOGGER.warning(pattern, er._info) if error: sys.exit(int(bool(errors))) return errors
[ "def", "process_paths", "(", "options", ",", "candidates", "=", "None", ",", "error", "=", "True", ")", ":", "errors", "=", "check_path", "(", "options", ",", "rootdir", "=", "CURDIR", ",", "candidates", "=", "candidates", ")", "if", "options", ".", "for...
33.85
20.7
def temp_output_file(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False): """ A context manager for convenience in creating a temporary file, which is deleted when exiting the context. Usage: with temp_output_file() as (fd, path): ... """ return _temp_output(False, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents, always_clean=always_clean)
[ "def", "temp_output_file", "(", "prefix", "=", "\"tmp\"", ",", "suffix", "=", "\"\"", ",", "dir", "=", "None", ",", "make_parents", "=", "False", ",", "always_clean", "=", "False", ")", ":", "return", "_temp_output", "(", "False", ",", "prefix", "=", "pr...
37.818182
22.909091
def fatal(msg, exitcode=1, **kwargs): """Prints a message then exits the program. Optionally pause before exit with `pause=True` kwarg.""" # NOTE: Can't use normal arg named `pause` since function has same name. pause_before_exit = kwargs.pop("pause") if "pause" in kwargs.keys() else False echo("[FATAL] " + msg, **kwargs) if pause_before_exit: pause() sys.exit(exitcode)
[ "def", "fatal", "(", "msg", ",", "exitcode", "=", "1", ",", "*", "*", "kwargs", ")", ":", "# NOTE: Can't use normal arg named `pause` since function has same name.", "pause_before_exit", "=", "kwargs", ".", "pop", "(", "\"pause\"", ")", "if", "\"pause\"", "in", "k...
44.444444
15.888889
def config_args(self): ''' Returns an iterator of 2-tuples (config_name, value), one for each configuration option in this config. This is more-or-less an internal method, but see, e.g., launch_tor()'s implementation if you think you need to use this for something. See :meth:`txtorcon.TorConfig.create_torrc` which returns a string which is also a valid ``torrc`` file ''' everything = dict() everything.update(self.config) everything.update(self.unsaved) for (k, v) in list(everything.items()): if type(v) is _ListWrapper: if k.lower() == 'hiddenservices': for x in v: for (kk, vv) in x.config_attributes(): yield (str(kk), str(vv)) else: # FIXME actually, is this right? don't we want ALL # the values in one string?! for x in v: yield (str(k), str(x)) else: yield (str(k), str(v))
[ "def", "config_args", "(", "self", ")", ":", "everything", "=", "dict", "(", ")", "everything", ".", "update", "(", "self", ".", "config", ")", "everything", ".", "update", "(", "self", ".", "unsaved", ")", "for", "(", "k", ",", "v", ")", "in", "li...
36.133333
19.8
def register(cls, associations, backend, style_aliases={}): """ Register the supplied dictionary of associations between elements and plotting classes to the specified backend. """ if backend not in cls.registry: cls.registry[backend] = {} cls.registry[backend].update(associations) groups = Options._option_groups if backend not in cls._options: cls._options[backend] = OptionTree([], groups=groups) if backend not in cls._custom_options: cls._custom_options[backend] = {} for view_class, plot in cls.registry[backend].items(): expanded_opts = [opt for key in plot.style_opts for opt in style_aliases.get(key, [])] style_opts = sorted(set(opt for opt in (expanded_opts + plot.style_opts) if opt not in plot._disabled_opts)) plot_opts = [k for k in plot.params().keys() if k not in ['name']] with param.logging_level('CRITICAL'): plot.style_opts = style_opts plot_opts = Keywords(plot_opts, target=view_class.__name__) style_opts = Keywords(style_opts, target=view_class.__name__) opt_groups = {'plot': Options(allowed_keywords=plot_opts), 'output': Options(allowed_keywords=Options._output_allowed_kws), 'style': Options(allowed_keywords=style_opts), 'norm': Options(framewise=False, axiswise=False, allowed_keywords=['framewise', 'axiswise'])} name = view_class.__name__ cls._options[backend][name] = opt_groups
[ "def", "register", "(", "cls", ",", "associations", ",", "backend", ",", "style_aliases", "=", "{", "}", ")", ":", "if", "backend", "not", "in", "cls", ".", "registry", ":", "cls", ".", "registry", "[", "backend", "]", "=", "{", "}", "cls", ".", "r...
47.972973
22.891892
def convert_to_vcard(name, value, allowed_object_type): """converts user input into vcard compatible data structures :param name: object name, only required for error messages :type name: str :param value: user input :type value: str or list(str) :param allowed_object_type: set the accepted return type for vcard attribute :type allowed_object_type: enum of type ObjectType :returns: cleaned user input, ready for vcard or a ValueError :rtype: str or list(str) """ if isinstance(value, str): if allowed_object_type == ObjectType.list_with_strings: raise ValueError( "Error: " + name + " must not contain a single string.") else: return value.strip() elif isinstance(value, list): if allowed_object_type == ObjectType.string: raise ValueError( "Error: " + name + " must not contain a list.") else: for entry in value: if not isinstance(entry, str): raise ValueError( "Error: " + name + " must not contain a nested list") # filter out empty list items and strip leading and trailing space return [x.strip() for x in value if x] else: if allowed_object_type == ObjectType.string: raise ValueError( "Error: " + name + " must be a string.") elif allowed_object_type == ObjectType.list_with_strings: raise ValueError( "Error: " + name + " must be a list with strings.") else: raise ValueError( "Error: " + name + " must be a string or a list with strings.")
[ "def", "convert_to_vcard", "(", "name", ",", "value", ",", "allowed_object_type", ")", ":", "if", "isinstance", "(", "value", ",", "str", ")", ":", "if", "allowed_object_type", "==", "ObjectType", ".", "list_with_strings", ":", "raise", "ValueError", "(", "\"E...
43.025641
17.564103
def write_fundamental(self, keyTimeValueDict): ''' write fundamental ''' if self.first: Base.metadata.create_all(self.__getEngine(), checkfirst=True) self.first=False sqls=self._fundamentalToSqls(keyTimeValueDict) session=self.Session() try: session.add_all(sqls) finally: self.Session.remove()
[ "def", "write_fundamental", "(", "self", ",", "keyTimeValueDict", ")", ":", "if", "self", ".", "first", ":", "Base", ".", "metadata", ".", "create_all", "(", "self", ".", "__getEngine", "(", ")", ",", "checkfirst", "=", "True", ")", "self", ".", "first",...
32.583333
16.75
def one(ctx, interactive, enable_phantomjs, enable_puppeteer, scripts): """ One mode not only means all-in-one, it runs every thing in one process over tornado.ioloop, for debug purpose """ ctx.obj['debug'] = False g = ctx.obj g['testing_mode'] = True if scripts: from pyspider.database.local.projectdb import ProjectDB g['projectdb'] = ProjectDB(scripts) if g.get('is_taskdb_default'): g['taskdb'] = connect_database('sqlite+taskdb://') if g.get('is_resultdb_default'): g['resultdb'] = None if enable_phantomjs: phantomjs_config = g.config.get('phantomjs', {}) phantomjs_obj = ctx.invoke(phantomjs, **phantomjs_config) if phantomjs_obj: g.setdefault('phantomjs_proxy', '127.0.0.1:%s' % phantomjs_obj.port) else: phantomjs_obj = None if enable_puppeteer: puppeteer_config = g.config.get('puppeteer', {}) puppeteer_obj = ctx.invoke(puppeteer, **puppeteer_config) if puppeteer_obj: g.setdefault('puppeteer_proxy', '127.0.0.1:%s' % puppeteer.port) else: puppeteer_obj = None result_worker_config = g.config.get('result_worker', {}) if g.resultdb is None: result_worker_config.setdefault('result_cls', 'pyspider.result.OneResultWorker') result_worker_obj = ctx.invoke(result_worker, **result_worker_config) processor_config = g.config.get('processor', {}) processor_config.setdefault('enable_stdout_capture', False) processor_obj = ctx.invoke(processor, **processor_config) fetcher_config = g.config.get('fetcher', {}) fetcher_config.setdefault('xmlrpc', False) fetcher_obj = ctx.invoke(fetcher, **fetcher_config) scheduler_config = g.config.get('scheduler', {}) scheduler_config.setdefault('xmlrpc', False) scheduler_config.setdefault('scheduler_cls', 'pyspider.scheduler.OneScheduler') scheduler_obj = ctx.invoke(scheduler, **scheduler_config) scheduler_obj.init_one(ioloop=fetcher_obj.ioloop, fetcher=fetcher_obj, processor=processor_obj, result_worker=result_worker_obj, interactive=interactive) if scripts: for project in g.projectdb.projects: scheduler_obj.trigger_on_start(project) try: scheduler_obj.run() finally: scheduler_obj.quit() if phantomjs_obj: phantomjs_obj.quit() if puppeteer_obj: puppeteer_obj.quit()
[ "def", "one", "(", "ctx", ",", "interactive", ",", "enable_phantomjs", ",", "enable_puppeteer", ",", "scripts", ")", ":", "ctx", ".", "obj", "[", "'debug'", "]", "=", "False", "g", "=", "ctx", ".", "obj", "g", "[", "'testing_mode'", "]", "=", "True", ...
36.352113
19.957746
def linkToChannelInputFile(self, session, channelInputFile, force=False): """ Create database relationships between the link node dataset and the channel input file. The link node dataset only stores references to the links and nodes--not the geometry. The link and node geometries are stored in the channel input file. The two files must be linked with database relationships to allow the creation of link node dataset visualizations. This process is not performed automatically during reading, because it can be very costly in terms of read time. This operation can only be performed after both files have been read into the database. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database channelInputFile (:class:`gsshapy.orm.ChannelInputFile`): Channel input file object to be associated with this link node dataset file. force (bool, optional): Force channel input file reassignment. When false (default), channel input file assignment is skipped if it has already been performed. """ # Only perform operation if the channel input file has not been assigned or the force parameter is true if self.channelInputFile is not None and not force: return # Set the channel input file relationship self.channelInputFile = channelInputFile # Retrieve the fluvial stream links orderedLinks = channelInputFile.getOrderedLinks(session) # Retrieve the LinkNodeTimeStep objects timeSteps = self.timeSteps # Link each link dataset in each time step for timeStep in timeSteps: # Retrieve link datasets linkDatasets = timeStep.linkDatasets # Link each node dataset for l, linkDataset in enumerate(linkDatasets): # Get the fluvial link and nodes streamLink = orderedLinks[l] streamNodes = streamLink.nodes # Link link datasets to fluvial links linkDataset.link = streamLink # Retrieve node datasets nodeDatasets = linkDataset.nodeDatasets # Link the node dataset with the channel input file nodes if len(nodeDatasets) > 0 and len(streamNodes) > 0: for n, nodeDataset in enumerate(nodeDatasets): nodeDataset.node = streamNodes[n] session.add(self) session.commit()
[ "def", "linkToChannelInputFile", "(", "self", ",", "session", ",", "channelInputFile", ",", "force", "=", "False", ")", ":", "# Only perform operation if the channel input file has not been assigned or the force parameter is true", "if", "self", ".", "channelInputFile", "is", ...
45.589286
28.767857
def rewrite_elife_authors_json(json_content, doi): """ this does the work of rewriting elife authors json """ # Convert doi from testing doi if applicable article_doi = elifetools.utils.convert_testing_doi(doi) # Edge case fix an affiliation name if article_doi == "10.7554/eLife.06956": for i, ref in enumerate(json_content): if ref.get("orcid") and ref.get("orcid") == "0000-0001-6798-0064": json_content[i]["affiliations"][0]["name"] = ["Cambridge"] # Edge case fix an ORCID if article_doi == "10.7554/eLife.09376": for i, ref in enumerate(json_content): if ref.get("orcid") and ref.get("orcid") == "000-0001-7224-925X": json_content[i]["orcid"] = "0000-0001-7224-925X" # Edge case competing interests if article_doi == "10.7554/eLife.00102": for i, ref in enumerate(json_content): if not ref.get("competingInterests"): if ref["name"]["index"].startswith("Chen,"): json_content[i]["competingInterests"] = "ZJC: Reviewing Editor, <i>eLife</i>" elif ref["name"]["index"].startswith("Li,"): json_content[i]["competingInterests"] = "The remaining authors have no competing interests to declare." if article_doi == "10.7554/eLife.00270": for i, ref in enumerate(json_content): if not ref.get("competingInterests"): if ref["name"]["index"].startswith("Patterson,"): json_content[i]["competingInterests"] = "MP: Managing Executive Editor, <i>eLife</i>" # Remainder of competing interests rewrites elife_author_competing_interests = {} elife_author_competing_interests["10.7554/eLife.00133"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.00190"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.00230"] = "The authors have declared that no competing interests exist" elife_author_competing_interests["10.7554/eLife.00288"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.00352"] = "The author declares that no competing interest exist" elife_author_competing_interests["10.7554/eLife.00362"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.00475"] = "The remaining authors have no competing interests to declare." elife_author_competing_interests["10.7554/eLife.00592"] = "The other authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.00633"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.02725"] = "The other authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.02935"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.04126"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.04878"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.05322"] = "The other authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.06011"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.06416"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.07383"] = "The other authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.08421"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.08494"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.08648"] = "The other authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.08924"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.09083"] = "The other authors declare that no competing interests exists." elife_author_competing_interests["10.7554/eLife.09102"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.09460"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.09591"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.09600"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.10113"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.10230"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.10453"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.10635"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.11407"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.11473"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.11750"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.12217"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.12620"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.12724"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.13023"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.13732"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.14116"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.14258"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.14694"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.15085"] = "The other authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.15312"] = "The other authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.16011"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.16940"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.17023"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.17092"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.17218"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.17267"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.17523"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.17556"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.17769"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.17834"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.18101"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.18515"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.18544"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.18648"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.19071"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.19334"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.19510"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.20183"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.20242"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.20375"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.20797"] = "The other authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.21454"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.21491"] = "The other authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.22187"] = "The authors declare that no competing interests exist." if article_doi in elife_author_competing_interests: for i, ref in enumerate(json_content): if not ref.get("competingInterests"): json_content[i]["competingInterests"] = elife_author_competing_interests[article_doi] # Rewrite "other authors declare" ... competing interests statements using a string match for i, ref in enumerate(json_content): if (ref.get("competingInterests") and ( ref.get("competingInterests").startswith("The other author") or ref.get("competingInterests").startswith("The others author") or ref.get("competingInterests").startswith("The remaining authors") or ref.get("competingInterests").startswith("The remaining have declared") )): json_content[i]["competingInterests"] = "No competing interests declared." return json_content
[ "def", "rewrite_elife_authors_json", "(", "json_content", ",", "doi", ")", ":", "# Convert doi from testing doi if applicable", "article_doi", "=", "elifetools", ".", "utils", ".", "convert_testing_doi", "(", "doi", ")", "# Edge case fix an affiliation name", "if", "article...
89.135593
55.389831
def isNumber(self, value): """ Validate whether a value is a number or not """ try: str(value) float(value) return True except ValueError: return False
[ "def", "isNumber", "(", "self", ",", "value", ")", ":", "try", ":", "str", "(", "value", ")", "float", "(", "value", ")", "return", "True", "except", "ValueError", ":", "return", "False" ]
20.909091
15.818182
def parse(self, inputstring, document): """Parse the nblink file. Adds the linked file as a dependency, read the file, and pass the content to the nbshpinx.NotebookParser. """ link = json.loads(inputstring) env = document.settings.env source_dir = os.path.dirname(env.doc2path(env.docname)) abs_path = os.path.normpath(os.path.join(source_dir, link['path'])) path = utils.relative_path(None, abs_path) path = nodes.reprunicode(path) document.settings.record_dependencies.add(path) env.note_dependency(path) target_root = env.config.nbsphinx_link_target_root target = utils.relative_path(target_root, abs_path) target = nodes.reprunicode(target).replace(os.path.sep, '/') env.metadata[env.docname]['nbsphinx-link-target'] = target # Copy parser from nbsphinx for our cutom format try: formats = env.config.nbsphinx_custom_formats except AttributeError: pass else: formats.setdefault( '.nblink', lambda s: nbformat.reads(s, as_version=_ipynbversion)) try: include_file = io.FileInput(source_path=path, encoding='utf8') except UnicodeEncodeError as error: raise NotebookError(u'Problems with linked notebook "%s" path:\n' 'Cannot encode input file path "%s" ' '(wrong locale?).' % (env.docname, SafeString(path))) except IOError as error: raise NotebookError(u'Problems with linked notebook "%s" path:\n%s.' % (env.docname, ErrorString(error))) try: rawtext = include_file.read() except UnicodeError as error: raise NotebookError(u'Problem with linked notebook "%s":\n%s' % (env.docname, ErrorString(error))) return super(LinkedNotebookParser, self).parse(rawtext, document)
[ "def", "parse", "(", "self", ",", "inputstring", ",", "document", ")", ":", "link", "=", "json", ".", "loads", "(", "inputstring", ")", "env", "=", "document", ".", "settings", ".", "env", "source_dir", "=", "os", ".", "path", ".", "dirname", "(", "e...
41.346939
21.510204
def scan_to_module(python_modules, module, ignore=tuple()): """ Scans `python_modules` with :py:func:`scan` and adds found providers to `module`'s :py:attr:`wiring.configuration.Module.providers`. `ignore` argument is passed through to :py:func:`scan`. """ def callback(specification, provider): module.providers[specification] = provider scan(python_modules, callback, ignore=ignore)
[ "def", "scan_to_module", "(", "python_modules", ",", "module", ",", "ignore", "=", "tuple", "(", ")", ")", ":", "def", "callback", "(", "specification", ",", "provider", ")", ":", "module", ".", "providers", "[", "specification", "]", "=", "provider", "sca...
41.2
15.8
def com_google_fonts_check_fsselection(ttFont, style): """Checking OS/2 fsSelection value.""" from fontbakery.utils import check_bit_entry from fontbakery.constants import (STATIC_STYLE_NAMES, RIBBI_STYLE_NAMES, FsSelection) # Checking fsSelection REGULAR bit: expected = "Regular" in style or \ (style in STATIC_STYLE_NAMES and style not in RIBBI_STYLE_NAMES and "Italic" not in style) yield check_bit_entry(ttFont, "OS/2", "fsSelection", expected, bitmask=FsSelection.REGULAR, bitname="REGULAR") # Checking fsSelection ITALIC bit: expected = "Italic" in style yield check_bit_entry(ttFont, "OS/2", "fsSelection", expected, bitmask=FsSelection.ITALIC, bitname="ITALIC") # Checking fsSelection BOLD bit: expected = style in ["Bold", "BoldItalic"] yield check_bit_entry(ttFont, "OS/2", "fsSelection", expected, bitmask=FsSelection.BOLD, bitname="BOLD")
[ "def", "com_google_fonts_check_fsselection", "(", "ttFont", ",", "style", ")", ":", "from", "fontbakery", ".", "utils", "import", "check_bit_entry", "from", "fontbakery", ".", "constants", "import", "(", "STATIC_STYLE_NAMES", ",", "RIBBI_STYLE_NAMES", ",", "FsSelectio...
39.266667
10.8
def police_priority_map_conform_map_pri7_conform(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") police_priority_map = ET.SubElement(config, "police-priority-map", xmlns="urn:brocade.com:mgmt:brocade-policer") name_key = ET.SubElement(police_priority_map, "name") name_key.text = kwargs.pop('name') conform = ET.SubElement(police_priority_map, "conform") map_pri7_conform = ET.SubElement(conform, "map-pri7-conform") map_pri7_conform.text = kwargs.pop('map_pri7_conform') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "police_priority_map_conform_map_pri7_conform", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "police_priority_map", "=", "ET", ".", "SubElement", "(", "config", ",", "\"police-priority-map\"", ...
49.846154
20.846154
def _close_thread(self, thread, thread_name): """Closes daemon threads @param thread: the thread to close @param thread_name: a human readable name of the thread """ if thread is not None and thread.isAlive(): self.logger.debug("Waiting for {} thread to close".format(thread_name)) thread.join(timeout=self.settings['DAEMON_THREAD_JOIN_TIMEOUT']) if thread.isAlive(): self.logger.warn("{} daemon thread unable to be shutdown" " within timeout".format(thread_name))
[ "def", "_close_thread", "(", "self", ",", "thread", ",", "thread_name", ")", ":", "if", "thread", "is", "not", "None", "and", "thread", ".", "isAlive", "(", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Waiting for {} thread to close\"", ".", "for...
48.25
19.333333
def _selectTransition(self, allocentricLocation, objectDict, visitCounts): """ Choose the transition that lands us in the location we've touched the least often. Break ties randomly, i.e. choose the first candidate in a shuffled list. """ candidates = list(transition for transition in self.transitions.keys() if (allocentricLocation[0] + transition[0], allocentricLocation[1] + transition[1]) in objectDict) random.shuffle(candidates) selectedVisitCount = None selectedTransition = None selectedAllocentricLocation = None for transition in candidates: candidateLocation = (allocentricLocation[0] + transition[0], allocentricLocation[1] + transition[1]) if (selectedVisitCount is None or visitCounts[candidateLocation] < selectedVisitCount): selectedVisitCount = visitCounts[candidateLocation] selectedTransition = transition selectedAllocentricLocation = candidateLocation return selectedAllocentricLocation, selectedTransition
[ "def", "_selectTransition", "(", "self", ",", "allocentricLocation", ",", "objectDict", ",", "visitCounts", ")", ":", "candidates", "=", "list", "(", "transition", "for", "transition", "in", "self", ".", "transitions", ".", "keys", "(", ")", "if", "(", "allo...
39.178571
21.678571
def ar1_gen(rho, mu, sigma, size=1): """Create an autoregressive series of order one AR(1) generator. .. math:: X_t = \mu_t + \rho (X_{t-1}-\mu_{t-1} + \epsilon_t If mu is a sequence and size > len(mu), the algorithm loops through mu. :Stochastics: rho : scalar in [0,1] mu : scalar or sequence sigma : scalar > 0 size : integer """ mu = np.asarray(mu, float) mu = np.resize(mu, size) r = mu.copy() r += np.random.randn(size) * sigma r[0] = np.random.randn(1) * sigma / np.sqrt(1 - rho ** 2) i = 0 while True: yield r[i] i += 1 if i == size: break r[i] += rho * (r[i - 1] - mu[i - 1])
[ "def", "ar1_gen", "(", "rho", ",", "mu", ",", "sigma", ",", "size", "=", "1", ")", ":", "mu", "=", "np", ".", "asarray", "(", "mu", ",", "float", ")", "mu", "=", "np", ".", "resize", "(", "mu", ",", "size", ")", "r", "=", "mu", ".", "copy",...
25.814815
19.481481
def decode_field(self, field, value): """Decode the given JSON value. Args: field: a messages.Field for the field we're decoding. value: a python value we'd like to decode. Returns: A value suitable for assignment to field. """ for decoder in _GetFieldCodecs(field, 'decoder'): result = decoder(field, value) value = result.value if result.complete: return value if isinstance(field, messages.MessageField): field_value = self.decode_message( field.message_type, json.dumps(value)) elif isinstance(field, messages.EnumField): value = GetCustomJsonEnumMapping( field.type, json_name=value) or value try: field_value = super( _ProtoJsonApiTools, self).decode_field(field, value) except messages.DecodeError: if not isinstance(value, six.string_types): raise field_value = None else: field_value = super( _ProtoJsonApiTools, self).decode_field(field, value) return field_value
[ "def", "decode_field", "(", "self", ",", "field", ",", "value", ")", ":", "for", "decoder", "in", "_GetFieldCodecs", "(", "field", ",", "'decoder'", ")", ":", "result", "=", "decoder", "(", "field", ",", "value", ")", "value", "=", "result", ".", "valu...
37.3125
14.5625
def decode_transaction_input(self, transaction_hash: bytes) -> Dict: """Return inputs of a method call""" transaction = self.contract.web3.eth.getTransaction( transaction_hash, ) return self.contract.decode_function_input( transaction['input'], )
[ "def", "decode_transaction_input", "(", "self", ",", "transaction_hash", ":", "bytes", ")", "->", "Dict", ":", "transaction", "=", "self", ".", "contract", ".", "web3", ".", "eth", ".", "getTransaction", "(", "transaction_hash", ",", ")", "return", "self", "...
33.666667
19.888889
def set_nonblock(fd): # type: (int) -> None """Set the given file descriptor to non-blocking mode.""" fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
[ "def", "set_nonblock", "(", "fd", ")", ":", "# type: (int) -> None", "fcntl", ".", "fcntl", "(", "fd", ",", "fcntl", ".", "F_SETFL", ",", "fcntl", ".", "fcntl", "(", "fd", ",", "fcntl", ".", "F_GETFL", ")", "|", "os", ".", "O_NONBLOCK", ")" ]
36.5
14.666667
def write(self, arg): """ Write a string or bytes object to the buffer """ if isinstance(arg, str): arg = arg.encode(self.encoding) return self._buffer.write(arg)
[ "def", "write", "(", "self", ",", "arg", ")", ":", "if", "isinstance", "(", "arg", ",", "str", ")", ":", "arg", "=", "arg", ".", "encode", "(", "self", ".", "encoding", ")", "return", "self", ".", "_buffer", ".", "write", "(", "arg", ")" ]
38.8
6.4
def set_outgoing(self, value): """ Setter for 'outgoing' field. :param value - a new value of 'outgoing' field. Must be a list of IDs (String type) of outgoing flows. """ if not isinstance(value, list): raise TypeError("OutgoingList new value must be a list") for element in value: if not isinstance(element, str): raise TypeError("OutgoingList elements in variable must be of String class") self.__outgoing_list = value
[ "def", "set_outgoing", "(", "self", ",", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "list", ")", ":", "raise", "TypeError", "(", "\"OutgoingList new value must be a list\"", ")", "for", "element", "in", "value", ":", "if", "not", "isins...
46
16.727273
def read_openke_translation(filename, delimiter='\t', entity_first=True): """Returns map with entity or relations from plain text.""" result = {} with open(filename, "r") as f: _ = next(f) # pass the total entry number for line in f: line_slice = line.rstrip().split(delimiter) if not entity_first: line_slice = list(reversed(line_slice)) result[line_slice[0]] = line_slice[1] return result
[ "def", "read_openke_translation", "(", "filename", ",", "delimiter", "=", "'\\t'", ",", "entity_first", "=", "True", ")", ":", "result", "=", "{", "}", "with", "open", "(", "filename", ",", "\"r\"", ")", "as", "f", ":", "_", "=", "next", "(", "f", ")...
38.666667
16.75
def _on_mode_change(self, mode): """Mode change broadcast from Abode SocketIO server.""" if isinstance(mode, (tuple, list)): mode = mode[0] if mode is None: _LOGGER.warning("Mode change event with no mode.") return if not mode or mode.lower() not in CONST.ALL_MODES: _LOGGER.warning("Mode change event with unknown mode: %s", mode) return _LOGGER.debug("Alarm mode change event to: %s", mode) # We're just going to convert it to an Alarm device alarm_device = self._abode.get_alarm(refresh=True) # At the time of development, refreshing after mode change notification # didn't seem to get the latest update immediately. As such, we will # force the mode status now to match the notification. # pylint: disable=W0212 alarm_device._json_state['mode']['area_1'] = mode for callback in self._device_callbacks.get(alarm_device.device_id, ()): _execute_callback(callback, alarm_device)
[ "def", "_on_mode_change", "(", "self", ",", "mode", ")", ":", "if", "isinstance", "(", "mode", ",", "(", "tuple", ",", "list", ")", ")", ":", "mode", "=", "mode", "[", "0", "]", "if", "mode", "is", "None", ":", "_LOGGER", ".", "warning", "(", "\"...
39.846154
24.423077
def setup(self, np=np, numpy_version=numpy_version, StrictVersion=StrictVersion, new_pandas=new_pandas): """Lives in zipline.__init__ for doctests.""" if numpy_version >= StrictVersion('1.14'): self.old_opts = np.get_printoptions() np.set_printoptions(legacy='1.13') else: self.old_opts = None if new_pandas: self.old_err = np.geterr() # old pandas has numpy compat that sets this np.seterr(all='ignore') else: self.old_err = None
[ "def", "setup", "(", "self", ",", "np", "=", "np", ",", "numpy_version", "=", "numpy_version", ",", "StrictVersion", "=", "StrictVersion", ",", "new_pandas", "=", "new_pandas", ")", ":", "if", "numpy_version", ">=", "StrictVersion", "(", "'1.14'", ")", ":", ...
27.894737
15.210526
def diffsp(self, col: str, serie: "iterable", name: str="Diff"): """ Add a diff column in percentage from a serie. The serie is an iterable of the same length than the dataframe :param col: column to diff :type col: str :param serie: serie to diff from :type serie: iterable :param name: name of the diff col, defaults to "Diff" :param name: str, optional :example: ``ds.diffp("Col 1", [1, 1, 4], "New col")`` """ try: d = [] for i, row in self.df.iterrows(): v = (row[col]*100) / serie[i] d.append(v) self.df[name] = d except Exception as e: self.err(e, self._append, "Can not diff column from serie")
[ "def", "diffsp", "(", "self", ",", "col", ":", "str", ",", "serie", ":", "\"iterable\"", ",", "name", ":", "str", "=", "\"Diff\"", ")", ":", "try", ":", "d", "=", "[", "]", "for", "i", ",", "row", "in", "self", ".", "df", ".", "iterrows", "(", ...
34.909091
16.181818
def delete(self): """ Removes current SyncItem """ url = SyncList.key.format(clientId=self.clientIdentifier) url += '/' + str(self.id) self._server.query(url, self._server._session.delete)
[ "def", "delete", "(", "self", ")", ":", "url", "=", "SyncList", ".", "key", ".", "format", "(", "clientId", "=", "self", ".", "clientIdentifier", ")", "url", "+=", "'/'", "+", "str", "(", "self", ".", "id", ")", "self", ".", "_server", ".", "query"...
43.2
15.2
def client_ident(self): """ Return the client identifier as included in many command replies. """ return irc.client.NickMask.from_params( self.nick, self.user, self.server.servername)
[ "def", "client_ident", "(", "self", ")", ":", "return", "irc", ".", "client", ".", "NickMask", ".", "from_params", "(", "self", ".", "nick", ",", "self", ".", "user", ",", "self", ".", "server", ".", "servername", ")" ]
33.285714
9.857143
def findPkt(pkt): """ Search through a string of binary for a valid xl320 package. in: buffer to search through out: a list of valid data packet """ # print('findpkt', pkt) # print('-----------------------') ret = [] while len(pkt)-10 >= 0: if pkt[0:4] != [0xFF, 0xFF, 0xFD, 0x00]: pkt.pop(0) # get rid of the first index # print(' - pop:', pkt) continue # print(' > good packet') length = (pkt[6] << 8) + pkt[5] # print(' > length', length) crc_pos = 5 + length pkt_crc = pkt[crc_pos:crc_pos + 2] crc = le(crc16(pkt[:crc_pos])) # if len(pkt) < (crc_pos + 1): # print('<<< need more data for findPkt >>>') # print(' > calc crc', crc) # print(' > pkt crc', pkt_crc) if pkt_crc == crc: pkt_end = crc_pos+2 ret.append(pkt[:pkt_end]) # print(' > found:', pkt[:pkt_end]) # print(' > pkt size', pkt_end) del pkt[:pkt_end] # print(' > remaining:', pkt) else: pkt_end = crc_pos+2 # print(' - crap:', pkt[:pkt_end]) del pkt[:pkt_end] # print('findpkt ret:', ret) return ret
[ "def", "findPkt", "(", "pkt", ")", ":", "# print('findpkt', pkt)", "# print('-----------------------')", "ret", "=", "[", "]", "while", "len", "(", "pkt", ")", "-", "10", ">=", "0", ":", "if", "pkt", "[", "0", ":", "4", "]", "!=", "[", "0xFF", ",", "...
25.717949
14.179487
def delete(self, container, del_objects=False): """ Deletes the specified container. If the container contains objects, the command will fail unless 'del_objects' is passed as True. In that case, each object will be deleted first, and then the container. """ if del_objects: nms = self.list_object_names(container, full_listing=True) self.api.bulk_delete(container, nms, async_=False) uri = "/%s" % utils.get_name(container) resp, resp_body = self.api.method_delete(uri)
[ "def", "delete", "(", "self", ",", "container", ",", "del_objects", "=", "False", ")", ":", "if", "del_objects", ":", "nms", "=", "self", ".", "list_object_names", "(", "container", ",", "full_listing", "=", "True", ")", "self", ".", "api", ".", "bulk_de...
49.818182
18.181818
def from_dict(cls, obj_dict): """ Load the object from a dictionary (produced with :py:func:`Concise.to_dict`) Returns: Concise: Loaded Concise object. """ # convert the output into a proper form obj_dict['output'] = helper.rec_dict_to_numpy_dict(obj_dict["output"]) helper.dict_to_numpy_dict(obj_dict['output']) if "trained_global_model" in obj_dict.keys(): raise Exception("Found trained_global_model feature in dictionary. Use ConciseCV.load to load this file.") dc = Concise(**obj_dict["param"]) # touch the hidden arguments dc._param = obj_dict["param"] if obj_dict["output"]["weights"] is None: dc._model_fitted = False else: dc._model_fitted = True dc._exec_time = obj_dict["execution_time"] dc.unused_param = obj_dict["unused_param"] dc._accuracy = obj_dict["output"]["accuracy"] dc._splines = obj_dict["output"]["splines"] weights = obj_dict["output"]["weights"] if weights is not None: # fix the dimensionality of X_feat in case it was 0 dimensional if weights["feature_weights"].shape == (0,): weights["feature_weights"].shape = (0, obj_dict["param"]["num_tasks"]) dc._set_var_res(weights) return dc
[ "def", "from_dict", "(", "cls", ",", "obj_dict", ")", ":", "# convert the output into a proper form", "obj_dict", "[", "'output'", "]", "=", "helper", ".", "rec_dict_to_numpy_dict", "(", "obj_dict", "[", "\"output\"", "]", ")", "helper", ".", "dict_to_numpy_dict", ...
36.378378
21.405405
def update(self, **kwargs): """Call this to change the configuration of the service on the device. This method uses HTTP PUT alter the service state on the device. The attributes of the instance will be packaged as a dictionary. That dictionary will be updated with kwargs. It is then submitted as JSON to the device. Various edge cases are handled: * read-only attributes that are unchangeable are removed * If ``fqdn`` is in the kwargs or set as an attribute, removes the ``autopopulate`` and ``addressFamily`` keys from it if there. :param kwargs: keys and associated values to alter on the device """ checked = self._check_node_parameters(**kwargs) return self._update(**checked)
[ "def", "update", "(", "self", ",", "*", "*", "kwargs", ")", ":", "checked", "=", "self", ".", "_check_node_parameters", "(", "*", "*", "kwargs", ")", "return", "self", ".", "_update", "(", "*", "*", "checked", ")" ]
42.888889
26.277778
def do_notify(context, event_type, payload): """Generic Notifier. Parameters: - `context`: session context - `event_type`: the event type to report, i.e. ip.usage - `payload`: dict containing the payload to send """ LOG.debug('IP_BILL: notifying {}'.format(payload)) notifier = n_rpc.get_notifier('network') notifier.info(context, event_type, payload)
[ "def", "do_notify", "(", "context", ",", "event_type", ",", "payload", ")", ":", "LOG", ".", "debug", "(", "'IP_BILL: notifying {}'", ".", "format", "(", "payload", ")", ")", "notifier", "=", "n_rpc", ".", "get_notifier", "(", "'network'", ")", "notifier", ...
32.5
14.75
def binary_dilation(x, radius=3): """Return fast binary morphological dilation of an image. see `skimage.morphology.binary_dilation <http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.binary_dilation>`__. Parameters ----------- x : 2D array A binary image. radius : int For the radius of mask. Returns ------- numpy.array A processed binary image. """ mask = disk(radius) x = _binary_dilation(x, selem=mask) return x
[ "def", "binary_dilation", "(", "x", ",", "radius", "=", "3", ")", ":", "mask", "=", "disk", "(", "radius", ")", "x", "=", "_binary_dilation", "(", "x", ",", "selem", "=", "mask", ")", "return", "x" ]
24.047619
25.380952
def flag_inner_classes(obj): """ Mutates any attributes on ``obj`` which are classes, with link to ``obj``. Adds a convenience accessor which instantiates ``obj`` and then calls its ``setup`` method. Recurses on those objects as well. """ for tup in class_members(obj): tup[1]._parent = obj tup[1]._parent_inst = None tup[1].__getattr__ = my_getattr flag_inner_classes(tup[1])
[ "def", "flag_inner_classes", "(", "obj", ")", ":", "for", "tup", "in", "class_members", "(", "obj", ")", ":", "tup", "[", "1", "]", ".", "_parent", "=", "obj", "tup", "[", "1", "]", ".", "_parent_inst", "=", "None", "tup", "[", "1", "]", ".", "__...
30.357143
15.642857
def getvalue(self) -> str: """Get the internal contents as a str""" return self.buffer.byte_buf.decode(encoding=self.encoding, errors=self.errors)
[ "def", "getvalue", "(", "self", ")", "->", "str", ":", "return", "self", ".", "buffer", ".", "byte_buf", ".", "decode", "(", "encoding", "=", "self", ".", "encoding", ",", "errors", "=", "self", ".", "errors", ")" ]
53.333333
20
def chrome_tracing_dump(self, filename=None): """Return a list of profiling events that can viewed as a timeline. To view this information as a timeline, simply dump it as a json file by passing in "filename" or using using json.dump, and then load go to chrome://tracing in the Chrome web browser and load the dumped file. Make sure to enable "Flow events" in the "View Options" menu. Args: filename: If a filename is provided, the timeline is dumped to that file. Returns: If filename is not provided, this returns a list of profiling events. Each profile event is a dictionary. """ # TODO(rkn): Support including the task specification data in the # timeline. # TODO(rkn): This should support viewing just a window of time or a # limited number of events. profile_table = self.profile_table() all_events = [] for component_id_hex, component_events in profile_table.items(): # Only consider workers and drivers. component_type = component_events[0]["component_type"] if component_type not in ["worker", "driver"]: continue for event in component_events: new_event = { # The category of the event. "cat": event["event_type"], # The string displayed on the event. "name": event["event_type"], # The identifier for the group of rows that the event # appears in. "pid": event["node_ip_address"], # The identifier for the row that the event appears in. "tid": event["component_type"] + ":" + event["component_id"], # The start time in microseconds. "ts": self._seconds_to_microseconds(event["start_time"]), # The duration in microseconds. "dur": self._seconds_to_microseconds(event["end_time"] - event["start_time"]), # What is this? "ph": "X", # This is the name of the color to display the box in. "cname": self._default_color_mapping[event["event_type"]], # The extra user-defined data. "args": event["extra_data"], } # Modify the json with the additional user-defined extra data. # This can be used to add fields or override existing fields. if "cname" in event["extra_data"]: new_event["cname"] = event["extra_data"]["cname"] if "name" in event["extra_data"]: new_event["name"] = event["extra_data"]["name"] all_events.append(new_event) if filename is not None: with open(filename, "w") as outfile: json.dump(all_events, outfile) else: return all_events
[ "def", "chrome_tracing_dump", "(", "self", ",", "filename", "=", "None", ")", ":", "# TODO(rkn): Support including the task specification data in the", "# timeline.", "# TODO(rkn): This should support viewing just a window of time or a", "# limited number of events.", "profile_table", ...
44.942029
21.985507
def _harvest_lost_resources(self): """Return lost resources to pool.""" with self._lock: for i in self._unavailable_range(): rtracker = self._reference_queue[i] if rtracker is not None and rtracker.available(): self.put_resource(rtracker.resource)
[ "def", "_harvest_lost_resources", "(", "self", ")", ":", "with", "self", ".", "_lock", ":", "for", "i", "in", "self", ".", "_unavailable_range", "(", ")", ":", "rtracker", "=", "self", ".", "_reference_queue", "[", "i", "]", "if", "rtracker", "is", "not"...
45.857143
11.571429
def get_features(self, did, wid, eid): ''' Gets the feature list for specified document / workspace / part studio. Args: - did (str): Document ID - wid (str): Workspace ID - eid (str): Element ID Returns: - requests.Response: Onshape response data ''' return self._api.request('get', '/api/partstudios/d/' + did + '/w/' + wid + '/e/' + eid + '/features')
[ "def", "get_features", "(", "self", ",", "did", ",", "wid", ",", "eid", ")", ":", "return", "self", ".", "_api", ".", "request", "(", "'get'", ",", "'/api/partstudios/d/'", "+", "did", "+", "'/w/'", "+", "wid", "+", "'/e/'", "+", "eid", "+", "'/featu...
31.428571
26.142857
def gather_verify_arguments(self): """ Need to add some information before running verify() :return: dictionary with arguments to the verify call """ kwargs = {'client_id': self.service_context.client_id, 'iss': self.service_context.issuer, 'keyjar': self.service_context.keyjar, 'verify': True} return kwargs
[ "def", "gather_verify_arguments", "(", "self", ")", ":", "kwargs", "=", "{", "'client_id'", ":", "self", ".", "service_context", ".", "client_id", ",", "'iss'", ":", "self", ".", "service_context", ".", "issuer", ",", "'keyjar'", ":", "self", ".", "service_c...
33.5
17
def commonprefix(m): "Given a list of pathnames, returns the longest common leading component" if not m: return '' s1 = min(m) s2 = max(m) for i, c in enumerate(s1): if c != s2[i]: return s1[:i] return s1
[ "def", "commonprefix", "(", "m", ")", ":", "if", "not", "m", ":", "return", "''", "s1", "=", "min", "(", "m", ")", "s2", "=", "max", "(", "m", ")", "for", "i", ",", "c", "in", "enumerate", "(", "s1", ")", ":", "if", "c", "!=", "s2", "[", ...
26.666667
21.555556
def serve(): """main entry point""" logging.getLogger().setLevel(logging.DEBUG) logging.info('Python Tornado Crossdock Server Running ...') server = Server(DefaultServerPortTChannel) endtoend_handler = EndToEndHandler() app = make_app(server, endtoend_handler) app.listen(DefaultClientPortHTTP) app.listen(DefaultServerPortHTTP) server.tchannel.listen() tornado.ioloop.IOLoop.current().start()
[ "def", "serve", "(", ")", ":", "logging", ".", "getLogger", "(", ")", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "logging", ".", "info", "(", "'Python Tornado Crossdock Server Running ...'", ")", "server", "=", "Server", "(", "DefaultServerPortTChannel"...
38.454545
8.090909
def _convert_string_name(self, k): """converts things like FOO_BAR to Foo-Bar which is the normal form""" k = String(k, "iso-8859-1") klower = k.lower().replace('_', '-') bits = klower.split('-') return "-".join((bit.title() for bit in bits))
[ "def", "_convert_string_name", "(", "self", ",", "k", ")", ":", "k", "=", "String", "(", "k", ",", "\"iso-8859-1\"", ")", "klower", "=", "k", ".", "lower", "(", ")", ".", "replace", "(", "'_'", ",", "'-'", ")", "bits", "=", "klower", ".", "split", ...
46.166667
6.166667
def _create_solver(self): r""" This method creates the petsc sparse linear solver. """ # http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/KSP/KSPType.html#KSPType iterative_solvers = ['richardson', 'chebyshev', 'cg', 'groppcg', 'pipecg', 'pipecgrr', 'cgne', 'nash', 'stcg', 'gltr', 'fcg', 'pipefcg', 'gmres', 'pipefgmres', 'fgmres', 'lgmres', 'dgmres', 'pgmres', 'tcqmr', 'bcgs', 'ibcgs', 'fbcgs', 'fbcgsr', 'bcgsl', 'pipebcgs', 'cgs', 'tfqmr', 'cr', 'pipecr', 'lsqr', 'preonly', 'qcg', 'bicg', 'minres', 'symmlq', 'lcd', 'python', 'gcr', 'pipegcr', 'tsirm', 'cgls', 'fetidp'] # http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/PC/PCType.html#PCType preconditioners = ['none', 'jacobi', 'sor', 'lu', 'shell', 'bjacobi', 'mg', 'eisenstat', 'ilu', 'icc', 'asm', 'gasm', 'ksp', 'composite', 'redundant', 'spai', 'nn', 'cholesky', 'pbjacobi', 'mat', 'hypre', 'parms', 'fieldsplit', 'tfs', 'ml', 'galerkin', 'exotic', 'cp', 'bfbt', 'lsc', 'python', 'pfmg', 'syspfmg', 'redistribute', 'svd', 'gamg', 'sacusp', 'sacusppoly', 'bicgstabcusp', 'ainvcusp', 'chowiluviennacl', 'rowscalingviennacl', 'saviennacl', 'bddc', 'kaczmarz', 'telescope'] lu_direct_solvers = ['mumps', 'superlu_dist', 'umfpack', 'klu'] cholesky_direct_solvers = ['mumps', 'cholmod'] solver = self.settings['type'] preconditioner = self.settings['preconditioner'] if solver not in (iterative_solvers + lu_direct_solvers + cholesky_direct_solvers): solver = 'cg' print('Warning: ' + self.settings['type'] + ' not availabe, ' + solver + ' used instead.') if preconditioner not in preconditioners: preconditioner = 'jacobi' print('Warning: ' + self.settings['preconditioner'] + ' not availabe, ' + preconditioner + ' used instead.') if solver in lu_direct_solvers: self.ksp = PETSc.KSP() self.ksp.create(PETSc.COMM_WORLD) self.ksp.getPC().setType('lu') self.ksp.getPC().setFactorSolverPackage(solver) self.ksp.setType('preonly') elif solver in cholesky_direct_solvers: self.ksp = PETSc.KSP() self.ksp.create(PETSc.COMM_WORLD) self.ksp.getPC().setType('cholesky') self.ksp.getPC().setFactorSolverPackage(solver) self.ksp.setType('preonly') elif solver in iterative_solvers: self.ksp = PETSc.KSP() self.ksp.create(PETSc.COMM_WORLD) self.ksp.getPC().setType(preconditioner) self.ksp.setType(solver) self.ksp.setTolerances(self.settings['atol'], self.settings['rtol'], self.settings['maxiter'])
[ "def", "_create_solver", "(", "self", ")", ":", "# http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/KSP/KSPType.html#KSPType", "iterative_solvers", "=", "[", "'richardson'", ",", "'chebyshev'", ",", "'cg'", ",", "'groppcg'", ",", "'pipecg'", ",", "'pipecgrr'", ",...
47.926471
22.294118
def _search(self, addr): """ Checks which segment that the address `addr` should belong to, and, returns the offset of that segment. Note that the address may not actually belong to the block. :param addr: The address to search :return: The offset of the segment. """ start = 0 end = len(self._list) while start != end: mid = (start + end) // 2 segment = self._list[mid] if addr < segment.start: end = mid elif addr >= segment.end: start = mid + 1 else: # Overlapped :( start = mid break return start
[ "def", "_search", "(", "self", ",", "addr", ")", ":", "start", "=", "0", "end", "=", "len", "(", "self", ".", "_list", ")", "while", "start", "!=", "end", ":", "mid", "=", "(", "start", "+", "end", ")", "//", "2", "segment", "=", "self", ".", ...
26.923077
18.769231
def create_xml_path(path, **kwargs): ''' Start a transient domain based on the XML-file path passed to the function :param path: path to a file containing the libvirt XML definition of the domain :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.create_xml_path <path to XML file on the node> ''' try: with salt.utils.files.fopen(path, 'r') as fp_: return create_xml_str( salt.utils.stringutils.to_unicode(fp_.read()), **kwargs ) except (OSError, IOError): return False
[ "def", "create_xml_path", "(", "path", ",", "*", "*", "kwargs", ")", ":", "try", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "path", ",", "'r'", ")", "as", "fp_", ":", "return", "create_xml_str", "(", "salt", ".", "utils", "...
29.793103
25.586207
def setdefault(self, k, d=None): """Override dict.setdefault() to title-case keys.""" return super(HeaderDict, self).setdefault(k.title(), d)
[ "def", "setdefault", "(", "self", ",", "k", ",", "d", "=", "None", ")", ":", "return", "super", "(", "HeaderDict", ",", "self", ")", ".", "setdefault", "(", "k", ".", "title", "(", ")", ",", "d", ")" ]
38.75
17.75
def is_well_grounded_concept(c: Concept, cutoff: float = 0.7) -> bool: """Check if a concept has a high grounding score. """ return is_grounded(c) and (top_grounding_score(c) >= cutoff)
[ "def", "is_well_grounded_concept", "(", "c", ":", "Concept", ",", "cutoff", ":", "float", "=", "0.7", ")", "->", "bool", ":", "return", "is_grounded", "(", "c", ")", "and", "(", "top_grounding_score", "(", "c", ")", ">=", "cutoff", ")" ]
47.75
23.5
def _string_width(self, s): """Get width of a string in the current font""" s = str(s) w = 0 for i in s: w += self.character_widths[i] return w * self.font_size / 1000.0
[ "def", "_string_width", "(", "self", ",", "s", ")", ":", "s", "=", "str", "(", "s", ")", "w", "=", "0", "for", "i", "in", "s", ":", "w", "+=", "self", ".", "character_widths", "[", "i", "]", "return", "w", "*", "self", ".", "font_size", "/", ...
31.571429
11.857143
def ticket_skips(self, ticket_id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/ticket_skips#list-skips-for-the-current-account" api_path = "/api/v2/tickets/{ticket_id}/skips.json" api_path = api_path.format(ticket_id=ticket_id) return self.call(api_path, **kwargs)
[ "def", "ticket_skips", "(", "self", ",", "ticket_id", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/tickets/{ticket_id}/skips.json\"", "api_path", "=", "api_path", ".", "format", "(", "ticket_id", "=", "ticket_id", ")", "return", "self", ".", ...
61.6
21.6
def handle_new_selection(self, models): """Handles the selection for generic widgets This is a helper method for generic widgets that want to modify the selection. These widgets can pass a list of newly selected (or clicked on) models. The method looks at the previous selection, the passed models and the list of pressed (modifier) keys: * If no modifier key is pressed, the previous selection is cleared and the new selection is set to the passed models * If the extend-selection modifier key is pressed, elements of `models` that are _not_ in the previous selection are selected, those that are in the previous selection are deselected :param models: The list of models that are newly selected/clicked on """ models = self._check_model_types(models) if extend_selection(): already_selected_elements = models & self._selected newly_selected_elements = models - self._selected self._selected.difference_update(already_selected_elements) self._selected.update(newly_selected_elements) else: self._selected = models self._selected = reduce_to_parent_states(self._selected)
[ "def", "handle_new_selection", "(", "self", ",", "models", ")", ":", "models", "=", "self", ".", "_check_model_types", "(", "models", ")", "if", "extend_selection", "(", ")", ":", "already_selected_elements", "=", "models", "&", "self", ".", "_selected", "newl...
49.12
31.16
def connect(self, callback, ref=False, position='first', before=None, after=None): """ Connect the callback to the event group. The callback will receive events from *all* of the emitters in the group. See :func:`EventEmitter.connect() <vispy.event.EventEmitter.connect>` for arguments. """ self._connect_emitters(True) return EventEmitter.connect(self, callback, ref, position, before, after)
[ "def", "connect", "(", "self", ",", "callback", ",", "ref", "=", "False", ",", "position", "=", "'first'", ",", "before", "=", "None", ",", "after", "=", "None", ")", ":", "self", ".", "_connect_emitters", "(", "True", ")", "return", "EventEmitter", "....
44.727273
15.181818
def invitations(): """List and manage received invitations. """ with Session() as session: try: result = session.VFolder.invitations() invitations = result.get('invitations', []) if len(invitations) < 1: print('No invitations.') return print('List of invitations (inviter, vfolder id, permission):') for cnt, inv in enumerate(invitations): if inv['perm'] == 'rw': perm = 'read-write' elif inv['perm'] == 'ro': perm = 'read-only' else: perm = inv['perm'] print('[{}] {}, {}, {}'.format(cnt + 1, inv['inviter'], inv['vfolder_id'], perm)) selection = input('Choose invitation number to manage: ') if selection.isdigit(): selection = int(selection) - 1 else: return if 0 <= selection < len(invitations): while True: action = input('Choose action. (a)ccept, (r)eject, (c)ancel: ') if action.lower() == 'a': # TODO: Let user can select access_key among many. # Currently, the config objects holds only one key. config = get_config() result = session.VFolder.accept_invitation( invitations[selection]['id'], config.access_key) print(result['msg']) break elif action.lower() == 'r': result = session.VFolder.delete_invitation( invitations[selection]['id']) print(result['msg']) break elif action.lower() == 'c': break except Exception as e: print_error(e) sys.exit(1)
[ "def", "invitations", "(", ")", ":", "with", "Session", "(", ")", "as", "session", ":", "try", ":", "result", "=", "session", ".", "VFolder", ".", "invitations", "(", ")", "invitations", "=", "result", ".", "get", "(", "'invitations'", ",", "[", "]", ...
42.702128
15.234043
def write(self, path, wrap_ttl=None, **kwargs): """Wrap the hvac write call, using the right token for cubbyhole interactions.""" path = sanitize_mount(path) val = None if path.startswith('cubbyhole'): self.token = self.initial_token val = super(Client, self).write(path, wrap_ttl=wrap_ttl, **kwargs) self.token = self.operational_token else: super(Client, self).write(path, wrap_ttl=wrap_ttl, **kwargs) return val
[ "def", "write", "(", "self", ",", "path", ",", "wrap_ttl", "=", "None", ",", "*", "*", "kwargs", ")", ":", "path", "=", "sanitize_mount", "(", "path", ")", "val", "=", "None", "if", "path", ".", "startswith", "(", "'cubbyhole'", ")", ":", "self", "...
39
15.615385
def check_usufy(self, query, **kwargs): """ Verifying a mailfy query in this platform. This might be redefined in any class inheriting from Platform. The only condition is that any of this should return a dictionary as defined. Args: ----- query: The element to be searched. kwargs: Dictionary with extra parameters. Just in case. Return: ------- Returns the collected data if exists or None if not. """ data = self.launchQueryForMode(query=query, mode="usufy") if self._somethingFound(data, mode="usufy"): return data return None
[ "def", "check_usufy", "(", "self", ",", "query", ",", "*", "*", "kwargs", ")", ":", "data", "=", "self", ".", "launchQueryForMode", "(", "query", "=", "query", ",", "mode", "=", "\"usufy\"", ")", "if", "self", ".", "_somethingFound", "(", "data", ",", ...
32.3
21.5
def get_subnets(): """ :return: all knows subnets """ LOGGER.debug("SubnetService.get_subnets") args = {'http_operation': 'GET', 'operation_path': ''} response = SubnetService.requester.call(args) ret = None if response.rc == 0: ret = [] for subnet in response.response_content['subnets']: ret.append(Subnet.json_2_subnet(subnet)) elif response.rc != 404: err_msg = 'SubnetService.get_subnets - Problem while getting subnets. ' \ '. Reason: ' + str(response.response_content) + '-' + str(response.error_message) + \ " (" + str(response.rc) + ")" LOGGER.warning(err_msg) return ret
[ "def", "get_subnets", "(", ")", ":", "LOGGER", ".", "debug", "(", "\"SubnetService.get_subnets\"", ")", "args", "=", "{", "'http_operation'", ":", "'GET'", ",", "'operation_path'", ":", "''", "}", "response", "=", "SubnetService", ".", "requester", ".", "call"...
41.722222
17.944444
def setData(self, column, role, value): """ if value is valid sets the data to value Args: column: column of item role: role of item (see Qt doc) value: value to be set """ assert isinstance(column, int) assert isinstance(role, int) # make sure that the right row is selected, this is not always the case for checkboxes and # combo boxes because they are items on top of the tree structure if isinstance(value, (QtWidgets.QComboBox, QtWidgets.QCheckBox)): self.treeWidget().setCurrentItem(self) # if row 2 (editrole, value has been entered) if role == 2 and column == 1: if isinstance(value, str): value = self.cast_type(value) # cast into same type as valid values if isinstance(value, QtCore.QVariant): value = self.cast_type(value.toString()) # cast into same type as valid values if isinstance(value, QtWidgets.QComboBox): value = self.cast_type(value.currentText()) if isinstance(value, QtWidgets.QCheckBox): value = bool(int(value.checkState())) # checkState() gives 2 (True) and 0 (False) # save value in internal variable self.value = value elif column == 0: # labels should not be changed so we set it back value = self.name if value is None: value = self.value # 180327(asafira) --- why do we need to do the following lines? Why not just always call super or always # emitDataChanged()? if not isinstance(value, bool): super(B26QTreeItem, self).setData(column, role, value) else: self.emitDataChanged()
[ "def", "setData", "(", "self", ",", "column", ",", "role", ",", "value", ")", ":", "assert", "isinstance", "(", "column", ",", "int", ")", "assert", "isinstance", "(", "role", ",", "int", ")", "# make sure that the right row is selected, this is not always the cas...
36.625
22.75
def from_response_data(cls, response_data): """ Response factory :param response_data: requests.models.Response :return: pybomb.clients.Response """ response_json = response_data.json() return cls( response_data.url, response_json["number_of_page_results"], response_json["number_of_total_results"], response_json["results"], )
[ "def", "from_response_data", "(", "cls", ",", "response_data", ")", ":", "response_json", "=", "response_data", ".", "json", "(", ")", "return", "cls", "(", "response_data", ".", "url", ",", "response_json", "[", "\"number_of_page_results\"", "]", ",", "response...
26.6875
15.4375
def param(name, value_info, is_required=True, label=None, desc=None): """ Annotate a parameter of the action being defined. @param name: name of the parameter defined. @type name: unicode or str @param value_info: the parameter value information. @type value_info: value.IValueInfo @param is_required: if the parameter is required or optional. @type is_required: bool @param label: the parameter label or None. @type label: str or unicode or None @param desc: the parameter description or None. @type desc: str or unicode or None """ _annotate("param", name, value_info, is_required=is_required, label=label, desc=desc)
[ "def", "param", "(", "name", ",", "value_info", ",", "is_required", "=", "True", ",", "label", "=", "None", ",", "desc", "=", "None", ")", ":", "_annotate", "(", "\"param\"", ",", "name", ",", "value_info", ",", "is_required", "=", "is_required", ",", ...
42.125
10.125
def set_qubit(self, qubit, element): """ Sets the qubit to the element Args: qubit (qbit): Element of self.qregs. element (DrawElement): Element to set in the qubit """ self.qubit_layer[self.qregs.index(qubit)] = element
[ "def", "set_qubit", "(", "self", ",", "qubit", ",", "element", ")", ":", "self", ".", "qubit_layer", "[", "self", ".", "qregs", ".", "index", "(", "qubit", ")", "]", "=", "element" ]
34.625
10.375
def camel_case_to_snake_case(name): """ HelloWorld -> hello_world """ s1 = _FIRST_CAP_RE.sub(r'\1_\2', name) return _ALL_CAP_RE.sub(r'\1_\2', s1).lower()
[ "def", "camel_case_to_snake_case", "(", "name", ")", ":", "s1", "=", "_FIRST_CAP_RE", ".", "sub", "(", "r'\\1_\\2'", ",", "name", ")", "return", "_ALL_CAP_RE", ".", "sub", "(", "r'\\1_\\2'", ",", "s1", ")", ".", "lower", "(", ")" ]
28
4.333333