text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def invoice(request, invoice_id, access_code=None): ''' Displays an invoice. This view is not authenticated, but it will only allow access to either: the user the invoice belongs to; staff; or a request made with the correct access code. Arguments: invoice_id (castable to int): The invoice_id for the invoice you want to view. access_code (Optional[str]): The access code for the user who owns this invoice. Returns: render: Renders ``registrasion/invoice.html``, with the following data:: { "invoice": models.commerce.Invoice(), } Raises: Http404: if the current user cannot view this invoice and the correct access_code is not provided. ''' current_invoice = InvoiceController.for_id_or_404(invoice_id) if not current_invoice.can_view( user=request.user, access_code=access_code, ): raise Http404() data = { "invoice": current_invoice.invoice, } return render(request, "registrasion/invoice.html", data)
[ "def", "invoice", "(", "request", ",", "invoice_id", ",", "access_code", "=", "None", ")", ":", "current_invoice", "=", "InvoiceController", ".", "for_id_or_404", "(", "invoice_id", ")", "if", "not", "current_invoice", ".", "can_view", "(", "user", "=", "reque...
26.139535
27.255814
def read_inifile (self, noexistok=False, typed=False): """Open assuming an “ini-file” format and return a generator yielding data records using either :func:`pwkit.inifile.read_stream` (if *typed* is false) or :func:`pwkit.tinifile.read_stream` (if it’s true). The latter version is designed to work with numerical data using the :mod:`pwkit.msmt` subsystem. If *noexistok* is true, a nonexistent file will result in no items being generated rather than an :exc:`IOError` being raised. """ if typed: from .tinifile import read_stream else: from .inifile import read_stream try: with self.open ('rb') as f: for item in read_stream (f): yield item except IOError as e: if e.errno != 2 or not noexistok: raise
[ "def", "read_inifile", "(", "self", ",", "noexistok", "=", "False", ",", "typed", "=", "False", ")", ":", "if", "typed", ":", "from", ".", "tinifile", "import", "read_stream", "else", ":", "from", ".", "inifile", "import", "read_stream", "try", ":", "wit...
40.136364
18.090909
def make_downsampled_type(cls, other_base): """ Factory for making Downsampled{Filter,Factor,Classifier}. """ docstring = dedent( """ A {t} that defers to another {t} at lower-than-daily frequency. Parameters ---------- term : {t} {{frequency}} """ ).format(t=other_base.__name__) doc = format_docstring( owner_name=other_base.__name__, docstring=docstring, formatters={'frequency': PIPELINE_DOWNSAMPLING_FREQUENCY_DOC}, ) return type( 'Downsampled' + other_base.__name__, (cls, other_base,), {'__doc__': doc, '__module__': other_base.__module__}, )
[ "def", "make_downsampled_type", "(", "cls", ",", "other_base", ")", ":", "docstring", "=", "dedent", "(", "\"\"\"\n A {t} that defers to another {t} at lower-than-daily frequency.\n\n Parameters\n ----------\n term : {t}\n {{frequency}}\...
28.407407
16.333333
def init(quick): # type: () -> None """ Create an empty pelconf.yaml from template """ config_file = 'pelconf.yaml' prompt = "-- <35>{} <32>already exists. Wipe it?<0>".format(config_file) if exists(config_file) and not click.confirm(shell.fmt(prompt)): log.info("Canceled") return form = InitForm().run(quick=quick) log.info('Writing <35>{}'.format(config_file)) pelconf_template = conf.load_template('pelconf.yaml') fs.write_file(config_file, pelconf_template.format(**form.values))
[ "def", "init", "(", "quick", ")", ":", "# type: () -> None", "config_file", "=", "'pelconf.yaml'", "prompt", "=", "\"-- <35>{} <32>already exists. Wipe it?<0>\"", ".", "format", "(", "config_file", ")", "if", "exists", "(", "config_file", ")", "and", "not", "click",...
35
22.066667
def _set_tacacs_server(self, v, load=False): """ Setter method for tacacs_server, mapped from YANG variable /tacacs_server (container) If this variable is read-only (config: false) in the source YANG file, then _set_tacacs_server is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_tacacs_server() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=tacacs_server.tacacs_server, is_container='container', presence=False, yang_name="tacacs-server", rest_name="tacacs-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'TACACS+ server configuration', u'cli-incomplete-no': None, u'sort-priority': u'11'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """tacacs_server must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=tacacs_server.tacacs_server, is_container='container', presence=False, yang_name="tacacs-server", rest_name="tacacs-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'TACACS+ server configuration', u'cli-incomplete-no': None, u'sort-priority': u'11'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True)""", }) self.__tacacs_server = t if hasattr(self, '_set'): self._set()
[ "def", "_set_tacacs_server", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "...
78.590909
36.136364
def b64_from(val): """Returns base64 encoded bytes for a given int/long/bytes value. :param int|long|bytes val: :rtype: bytes|str """ if isinstance(val, integer_types): val = int_to_bytes(val) return b64encode(val).decode('ascii')
[ "def", "b64_from", "(", "val", ")", ":", "if", "isinstance", "(", "val", ",", "integer_types", ")", ":", "val", "=", "int_to_bytes", "(", "val", ")", "return", "b64encode", "(", "val", ")", ".", "decode", "(", "'ascii'", ")" ]
28.333333
11.444444
def get(self, id, service='facebook', type='analysis'): """ Get a given Pylon task :param id: The ID of the task :type id: str :param service: The PYLON service (facebook) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.get(service + '/task/' + type + '/' + id)
[ "def", "get", "(", "self", ",", "id", ",", "service", "=", "'facebook'", ",", "type", "=", "'analysis'", ")", ":", "return", "self", ".", "request", ".", "get", "(", "service", "+", "'/task/'", "+", "type", "+", "'/'", "+", "id", ")" ]
44
16.692308
def scan_prefix_ids(self, prefix): '''Scan for ids with a given prefix. :param str prefix: Identifier prefix. :param [str] feature_names: A list of feature names to retrieve. When ``None``, all features are retrieved. Wildcards are allowed. :rtype: Iterable of ``content_id`` ''' resp = self._scan_prefix(prefix, feature_names=False) for hit in resp: yield did(hit['_id'])
[ "def", "scan_prefix_ids", "(", "self", ",", "prefix", ")", ":", "resp", "=", "self", ".", "_scan_prefix", "(", "prefix", ",", "feature_names", "=", "False", ")", "for", "hit", "in", "resp", ":", "yield", "did", "(", "hit", "[", "'_id'", "]", ")" ]
37.5
14.666667
def _execShowCmd(self, showcmd): """Execute 'show' command and return result dictionary. @param cmd: Command string. @return: Result dictionary. """ result = None lines = self._execCmd("show", showcmd) if lines and len(lines) >= 2 and lines[0] != '' and lines[0][0] != '-': result = {} result['keys'] = lines[0].split(',') items = [] for line in lines[1:]: if line == '': break items.append(line.split(',')) result['items'] = items return result
[ "def", "_execShowCmd", "(", "self", ",", "showcmd", ")", ":", "result", "=", "None", "lines", "=", "self", ".", "_execCmd", "(", "\"show\"", ",", "showcmd", ")", "if", "lines", "and", "len", "(", "lines", ")", ">=", "2", "and", "lines", "[", "0", "...
33.315789
12.473684
def protocol_authenticate(self, account=None): """ Low-level API to perform protocol-level authentication on protocols that support it. .. HINT:: In most cases, you want to use the login() method instead, as it automatically chooses the best login method for each protocol. :type account: Account :param account: An account object, like login(). """ with self._get_account(account) as account: user = account.get_name() password = account.get_password() key = account.get_key() if key is None: self._dbg(1, "Attempting to authenticate %s." % user) self._protocol_authenticate(user, password) else: self._dbg(1, "Authenticate %s with key." % user) self._protocol_authenticate_by_key(user, key) self.proto_authenticated = True
[ "def", "protocol_authenticate", "(", "self", ",", "account", "=", "None", ")", ":", "with", "self", ".", "_get_account", "(", "account", ")", "as", "account", ":", "user", "=", "account", ".", "get_name", "(", ")", "password", "=", "account", ".", "get_p...
40.130435
17.695652
def push(self, vs): 'Move given sheet `vs` to index 0 of list `sheets`.' if vs: vs.vd = self if vs in self.sheets: self.sheets.remove(vs) self.sheets.insert(0, vs) elif not vs.loaded: self.sheets.insert(0, vs) vs.reload() vs.recalc() # set up Columns else: self.sheets.insert(0, vs) if vs.precious and vs not in vs.vd.allSheets: vs.vd.allSheets[vs] = vs.name return vs
[ "def", "push", "(", "self", ",", "vs", ")", ":", "if", "vs", ":", "vs", ".", "vd", "=", "self", "if", "vs", "in", "self", ".", "sheets", ":", "self", ".", "sheets", ".", "remove", "(", "vs", ")", "self", ".", "sheets", ".", "insert", "(", "0"...
32.588235
13.294118
def join(cls, diffs: Iterable['DBDiff']) -> 'DBDiff': """ Join several DBDiff objects into a single DBDiff object. In case of a conflict, changes in diffs that come later in ``diffs`` will overwrite changes from earlier changes. """ tracker = DBDiffTracker() for diff in diffs: diff.apply_to(tracker) return tracker.diff()
[ "def", "join", "(", "cls", ",", "diffs", ":", "Iterable", "[", "'DBDiff'", "]", ")", "->", "'DBDiff'", ":", "tracker", "=", "DBDiffTracker", "(", ")", "for", "diff", "in", "diffs", ":", "diff", ".", "apply_to", "(", "tracker", ")", "return", "tracker",...
35.363636
14.818182
def centroid_2dg(data, error=None, mask=None): """ Calculate the centroid of a 2D array by fitting a 2D Gaussian (plus a constant) to the array. Invalid values (e.g. NaNs or infs) in the ``data`` or ``error`` arrays are automatically masked. The mask for invalid values represents the combination of the invalid-value masks for the ``data`` and ``error`` arrays. Parameters ---------- data : array_like The 2D data array. error : array_like, optional The 2D array of the 1-sigma errors of the input ``data``. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Returns ------- centroid : `~numpy.ndarray` The ``x, y`` coordinates of the centroid. """ gfit = fit_2dgaussian(data, error=error, mask=mask) return np.array([gfit.x_mean.value, gfit.y_mean.value])
[ "def", "centroid_2dg", "(", "data", ",", "error", "=", "None", ",", "mask", "=", "None", ")", ":", "gfit", "=", "fit_2dgaussian", "(", "data", ",", "error", "=", "error", ",", "mask", "=", "mask", ")", "return", "np", ".", "array", "(", "[", "gfit"...
30.967742
22.709677
def queryEx(self, viewcls, *args, **kwargs): """ Query a view, with the ``viewcls`` instance receiving events of the query as they arrive. :param type viewcls: A class (derived from :class:`AsyncViewBase`) to instantiate Other arguments are passed to the standard `query` method. This functions exactly like the :meth:`~couchbase.asynchronous.AsyncBucket.query` method, except it automatically schedules operations if the connection has not yet been negotiated. """ kwargs['itercls'] = viewcls o = super(AsyncBucket, self).query(*args, **kwargs) if not self.connected: self.connect().addCallback(lambda x: o.start()) else: o.start() return o
[ "def", "queryEx", "(", "self", ",", "viewcls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'itercls'", "]", "=", "viewcls", "o", "=", "super", "(", "AsyncBucket", ",", "self", ")", ".", "query", "(", "*", "args", ",", "*"...
33.478261
22.869565
def prep_for_deserialize(model, record, using, init_list=None): # pylint:disable=unused-argument """ Convert a record from SFDC (decoded JSON) to dict(model string, pk, fields) If fixes fields of some types. If names of required fields `init_list `are specified, then only these fields are processed. """ # TODO the parameter 'using' is not currently important. attribs = record.pop('attributes') # NOQA pylint:disable=unused-variable mod = model.__module__.split('.') if hasattr(model._meta, 'app_label'): app_label = getattr(model._meta, 'app_label') elif mod[-1] == 'models': app_label = mod[-2] else: raise ImproperlyConfigured("Can't discover the app_label for %s, you must specify it via model meta options.") if len(record.keys()) == 1 and model._meta.db_table in record: # this is for objects with ManyToManyField and OneToOneField while len(record) == 1: record = list(record.values())[0] if record is None: return None fields = prep_for_deserialize_inner(model, record, init_list=init_list) if init_list and set(init_list).difference(fields).difference([SF_PK]): raise DatabaseError("Not found some expected fields") return dict( model='.'.join([app_label, model.__name__]), pk=record.pop('Id'), fields=fields, )
[ "def", "prep_for_deserialize", "(", "model", ",", "record", ",", "using", ",", "init_list", "=", "None", ")", ":", "# pylint:disable=unused-argument", "# TODO the parameter 'using' is not currently important.", "attribs", "=", "record", ".", "pop", "(", "'attributes'", ...
40.352941
24.588235
def request_permissions(self, permissions): """ Return a future that resolves with the results of the permission requests """ f = self.create_future() #: Old versions of android did permissions at install time if self.api_level < 23: f.set_result({p: True for p in permissions}) return f w = self.widget request_code = self._permission_code self._permission_code += 1 #: So next call has a unique code #: On first request, setup our listener, and request the permission if request_code == 0: w.setPermissionResultListener(w.getId()) w.onRequestPermissionsResult.connect(self._on_permission_result) def on_results(code, perms, results): #: Check permissions f.set_result({p: r == Activity.PERMISSION_GRANTED for (p, r) in zip(perms, results)}) #: Save a reference self._permission_requests[request_code] = on_results #: Send out the request self.widget.requestPermissions(permissions, request_code) return f
[ "def", "request_permissions", "(", "self", ",", "permissions", ")", ":", "f", "=", "self", ".", "create_future", "(", ")", "#: Old versions of android did permissions at install time", "if", "self", ".", "api_level", "<", "23", ":", "f", ".", "set_result", "(", ...
34.121212
20.909091
def FSeek(params, ctxt, scope, stream, coord): """Returns 0 if successful or -1 if the address is out of range """ if len(params) != 1: raise errors.InvalidArguments(coord, "{} args".format(len(params)), "FSeek accepts only one argument") pos = PYVAL(params[0]) curr_pos = stream.tell() fsize = stream.size() if pos > fsize: stream.seek(fsize) return -1 elif pos < 0: stream.seek(0) return -1 diff = pos - curr_pos if diff < 0: stream.seek(pos) return 0 data = stream.read(diff) # let the ctxt automatically append numbers, as needed, unless the previous # child was also a skipped field skipped_name = "_skipped" if len(ctxt._pfp__children) > 0 and ctxt._pfp__children[-1]._pfp__name.startswith("_skipped"): old_name = ctxt._pfp__children[-1]._pfp__name data = ctxt._pfp__children[-1].raw_data + data skipped_name = old_name ctxt._pfp__children = ctxt._pfp__children[:-1] del ctxt._pfp__children_map[old_name] tmp_stream = bitwrap.BitwrappedStream(six.BytesIO(data)) new_field = pfp.fields.Array(len(data), pfp.fields.Char, tmp_stream) ctxt._pfp__add_child(skipped_name, new_field, stream) scope.add_var(skipped_name, new_field) return 0
[ "def", "FSeek", "(", "params", ",", "ctxt", ",", "scope", ",", "stream", ",", "coord", ")", ":", "if", "len", "(", "params", ")", "!=", "1", ":", "raise", "errors", ".", "InvalidArguments", "(", "coord", ",", "\"{} args\"", ".", "format", "(", "len",...
30.619048
23.047619
def Print(self, output_writer): """Prints a human readable version of the filter. Args: output_writer (CLIOutputWriter): output writer. """ if self._names: output_writer.Write('\tnames: {0:s}\n'.format( ', '.join(self._names)))
[ "def", "Print", "(", "self", ",", "output_writer", ")", ":", "if", "self", ".", "_names", ":", "output_writer", ".", "Write", "(", "'\\tnames: {0:s}\\n'", ".", "format", "(", "', '", ".", "join", "(", "self", ".", "_names", ")", ")", ")" ]
28.666667
14.666667
def download(self, version=None, tags=None, ext=None, overwrite=False, verbose=False, **kwargs): """Downloads the given instance of this dataset from dataset store. Parameters ---------- version: str, optional The version of the instance of this dataset. tags : list of str, optional The tags associated with the given instance of this dataset. ext : str, optional The file extension to use. If not given, the default extension is used. overwrite : bool, default False If set to True, the given instance of the dataset is downloaded from dataset store even if it exists in the local data directory. Otherwise, if a matching dataset is found localy, download is skipped. verbose : bool, default False If set to True, informative messages are printed. **kwargs : extra keyword arguments Extra keyword arguments are forwarded to azure.storage.blob.BlockBlobService.get_blob_to_path. """ fpath = self.fpath(version=version, tags=tags, ext=ext) if os.path.isfile(fpath) and not overwrite: if verbose: print( "File exists and overwrite set to False, so not " "downloading {} with version={} and tags={}".format( self.name, version, tags)) return download_dataset( dataset_name=self.name, file_path=fpath, task=self.task, dataset_attributes=self.kwargs, **kwargs, )
[ "def", "download", "(", "self", ",", "version", "=", "None", ",", "tags", "=", "None", ",", "ext", "=", "None", ",", "overwrite", "=", "False", ",", "verbose", "=", "False", ",", "*", "*", "kwargs", ")", ":", "fpath", "=", "self", ".", "fpath", "...
42.307692
17.846154
def datetimes(self): """A sorted list of datetimes in this analysis period.""" if self._timestamps_data is None: self._calculate_timestamps() return tuple(DateTime.from_moy(moy, self.is_leap_year) for moy in self._timestamps_data)
[ "def", "datetimes", "(", "self", ")", ":", "if", "self", ".", "_timestamps_data", "is", "None", ":", "self", ".", "_calculate_timestamps", "(", ")", "return", "tuple", "(", "DateTime", ".", "from_moy", "(", "moy", ",", "self", ".", "is_leap_year", ")", "...
47
9.5
def placeCursor(self, pos): """ Try to place the cursor in ``line`` at ``col`` if possible. If this is not possible, then place it at the end. """ if pos > len(self.qteWidget.toPlainText()): pos = len(self.qteWidget.toPlainText()) tc = self.qteWidget.textCursor() tc.setPosition(pos) self.qteWidget.setTextCursor(tc)
[ "def", "placeCursor", "(", "self", ",", "pos", ")", ":", "if", "pos", ">", "len", "(", "self", ".", "qteWidget", ".", "toPlainText", "(", ")", ")", ":", "pos", "=", "len", "(", "self", ".", "qteWidget", ".", "toPlainText", "(", ")", ")", "tc", "=...
34.818182
12.090909
def span_path(cls, project, trace, span): """Return a fully-qualified span string.""" return google.api_core.path_template.expand( "projects/{project}/traces/{trace}/spans/{span}", project=project, trace=trace, span=span, )
[ "def", "span_path", "(", "cls", ",", "project", ",", "trace", ",", "span", ")", ":", "return", "google", ".", "api_core", ".", "path_template", ".", "expand", "(", "\"projects/{project}/traces/{trace}/spans/{span}\"", ",", "project", "=", "project", ",", "trace"...
36
13.875
def foreachPartition(self, f): """ Applies a function to each partition of this RDD. >>> def f(iterator): ... for x in iterator: ... print(x) >>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f) """ def func(it): r = f(it) try: return iter(r) except TypeError: return iter([]) self.mapPartitions(func).count()
[ "def", "foreachPartition", "(", "self", ",", "f", ")", ":", "def", "func", "(", "it", ")", ":", "r", "=", "f", "(", "it", ")", "try", ":", "return", "iter", "(", "r", ")", "except", "TypeError", ":", "return", "iter", "(", "[", "]", ")", "self"...
28.1875
13.1875
def calculate_mean(samples, weights): r'''Calculate the mean of weighted samples (like the output of an importance-sampling run). :param samples: Matrix-like numpy array; the samples to be used. :param weights: Vector-like numpy array; the (unnormalized) importance weights. ''' assert len(samples) == len(weights), "The number of samples (got %i) must equal the number of weights (got %i)." % (len(samples),len(weights)) return _np.average(samples, axis=0, weights=weights)
[ "def", "calculate_mean", "(", "samples", ",", "weights", ")", ":", "assert", "len", "(", "samples", ")", "==", "len", "(", "weights", ")", ",", "\"The number of samples (got %i) must equal the number of weights (got %i).\"", "%", "(", "len", "(", "samples", ")", "...
33.933333
32.466667
def GET_AUTH(self): # pylint: disable=arguments-differ """ GET request """ auth_methods = self.user_manager.get_auth_methods() user_data = self.database.users.find_one({"username": self.user_manager.session_username()}) bindings = user_data.get("bindings", {}) return self.template_helper.get_renderer().preferences.bindings(bindings, auth_methods, "", False)
[ "def", "GET_AUTH", "(", "self", ")", ":", "# pylint: disable=arguments-differ", "auth_methods", "=", "self", ".", "user_manager", ".", "get_auth_methods", "(", ")", "user_data", "=", "self", ".", "database", ".", "users", ".", "find_one", "(", "{", "\"username\"...
65.833333
28
def repartition(self, numPartitions): """Repartition every RDD. :rtype: DStream Example: >>> import pysparkling >>> sc = pysparkling.Context() >>> ssc = pysparkling.streaming.StreamingContext(sc, 0.1) >>> ( ... ssc ... .queueStream([['hello', 'world']]) ... .repartition(2) ... .foreachRDD(lambda rdd: print(len(rdd.partitions()))) ... ) >>> ssc.start() >>> ssc.awaitTermination(0.25) 2 0 """ return self.transform( lambda rdd: (rdd.repartition(numPartitions) if not isinstance(rdd, EmptyRDD) else rdd) )
[ "def", "repartition", "(", "self", ",", "numPartitions", ")", ":", "return", "self", ".", "transform", "(", "lambda", "rdd", ":", "(", "rdd", ".", "repartition", "(", "numPartitions", ")", "if", "not", "isinstance", "(", "rdd", ",", "EmptyRDD", ")", "els...
25.481481
21.037037
def _convert_angle_limit(angle, joint, **kwargs): """Converts the limit angle of the PyPot JSON file to the internal format""" angle_pypot = angle # No need to take care of orientation if joint["orientation"] == "indirect": angle_pypot = 1 * angle_pypot # angle_pypot = angle_pypot + offset return angle_pypot * np.pi / 180
[ "def", "_convert_angle_limit", "(", "angle", ",", "joint", ",", "*", "*", "kwargs", ")", ":", "angle_pypot", "=", "angle", "# No need to take care of orientation", "if", "joint", "[", "\"orientation\"", "]", "==", "\"indirect\"", ":", "angle_pypot", "=", "1", "*...
31.636364
14.181818
def handle_authorized_event(self, event): """Request roster upon login.""" self.server = event.authorized_jid.bare() if "versioning" in self.server_features: if self.roster is not None and self.roster.version is not None: version = self.roster.version else: version = u"" else: version = None self.request_roster(version)
[ "def", "handle_authorized_event", "(", "self", ",", "event", ")", ":", "self", ".", "server", "=", "event", ".", "authorized_jid", ".", "bare", "(", ")", "if", "\"versioning\"", "in", "self", ".", "server_features", ":", "if", "self", ".", "roster", "is", ...
38.090909
12.454545
def getRepositories(self): """Returns a list of repositories for this directory. """ if self.srcdir and not self.duplicate: return self.srcdir.get_all_rdirs() + self.repositories return self.repositories
[ "def", "getRepositories", "(", "self", ")", ":", "if", "self", ".", "srcdir", "and", "not", "self", ".", "duplicate", ":", "return", "self", ".", "srcdir", ".", "get_all_rdirs", "(", ")", "+", "self", ".", "repositories", "return", "self", ".", "reposito...
40.333333
9
def normalize_value(self, value, transform=True): """Prepare the given value to be stored in the index For the parameters, see BaseIndex.normalize_value Raises ------ ValueError If ``raise_if_not_float`` is True and the value cannot be casted to a float. """ if transform: value = self.transform_value(value) try: return float(value) except (ValueError, TypeError): if self.raise_if_not_float: raise ValueError('Invalid value %s for field %s.%s' % ( value, self.model.__name__, self.field.name )) return 0
[ "def", "normalize_value", "(", "self", ",", "value", ",", "transform", "=", "True", ")", ":", "if", "transform", ":", "value", "=", "self", ".", "transform_value", "(", "value", ")", "try", ":", "return", "float", "(", "value", ")", "except", "(", "Val...
31.045455
18.818182
def get_templates(self, limit=100, offset=0): """ Get all account templates """ url = self.TEMPLATES_URL + "?limit=%s&offset=%s" % (limit, offset) connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
[ "def", "get_templates", "(", "self", ",", "limit", "=", "100", ",", "offset", "=", "0", ")", ":", "url", "=", "self", ".", "TEMPLATES_URL", "+", "\"?limit=%s&offset=%s\"", "%", "(", "limit", ",", "offset", ")", "connection", "=", "Connection", "(", "self...
27.636364
16.181818
def calc_q0_perc_uz_v1(self): """Perform the upper zone layer routine which determines percolation to the lower zone layer and the fast response of the hland model. Note that the system behaviour of this method depends strongly on the specifications of the options |RespArea| and |RecStep|. Required control parameters: |RecStep| |PercMax| |K| |Alpha| Required derived parameters: |DT| Required fluxes sequence: |InUZ| Calculated fluxes sequences: |Perc| |Q0| Updated state sequence: |UZ| Basic equations: :math:`\\frac{dUZ}{dt} = InUZ - Perc - Q0` \n :math:`Perc = PercMax \\cdot ContriArea` \n :math:`Q0 = K * \\cdot \\left( \\frac{UZ}{ContriArea} \\right)^{1+Alpha}` Examples: The upper zone layer routine is an exception compared to the other routines of the HydPy-H-Land model, regarding its consideration of numerical accuracy. To increase the accuracy of the numerical integration of the underlying ordinary differential equation, each simulation step can be divided into substeps, which are all solved with first order accuracy. In the first example, this option is omitted through setting the RecStep parameter to one: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> recstep(2) >>> derived.dt = 1/recstep >>> percmax(2.0) >>> alpha(1.0) >>> k(2.0) >>> fluxes.contriarea = 1.0 >>> fluxes.inuz = 0.0 >>> states.uz = 1.0 >>> model.calc_q0_perc_uz_v1() >>> fluxes.perc perc(1.0) >>> fluxes.q0 q0(0.0) >>> states.uz uz(0.0) Due to the sequential calculation of the upper zone routine, the upper zone storage is drained completely through percolation and no water is left for fast discharge response. By dividing the simulation step in 100 substeps, the results are quite different: >>> recstep(200) >>> derived.dt = 1.0/recstep >>> states.uz = 1.0 >>> model.calc_q0_perc_uz_v1() >>> fluxes.perc perc(0.786934) >>> fluxes.q0 q0(0.213066) >>> states.uz uz(0.0) Note that the assumed length of the simulation step is only a half day. Hence the effective values of the maximum percolation rate and the storage coefficient is not 2 but 1: >>> percmax percmax(2.0) >>> k k(2.0) >>> percmax.value 1.0 >>> k.value 1.0 By decreasing the contributing area one decreases percolation but increases fast discharge response: >>> fluxes.contriarea = 0.5 >>> states.uz = 1.0 >>> model.calc_q0_perc_uz_v1() >>> fluxes.perc perc(0.434108) >>> fluxes.q0 q0(0.565892) >>> states.uz uz(0.0) Resetting RecStep leads to more transparent results. Note that, due to the large value of the storage coefficient and the low accuracy of the numerical approximation, direct discharge drains the rest of the upper zone storage: >>> recstep(2) >>> derived.dt = 1.0/recstep >>> states.uz = 1.0 >>> model.calc_q0_perc_uz_v1() >>> fluxes.perc perc(0.5) >>> fluxes.q0 q0(0.5) >>> states.uz uz(0.0) Applying a more reasonable storage coefficient results in: >>> k(0.5) >>> states.uz = 1.0 >>> model.calc_q0_perc_uz_v1() >>> fluxes.perc perc(0.5) >>> fluxes.q0 q0(0.25) >>> states.uz uz(0.25) Adding an input of 0.3 mm results the same percolation value (which, in the given example, is determined by the maximum percolation rate only), but in an increases value of the direct response (which always depends on the actual upper zone storage directly): >>> fluxes.inuz = 0.3 >>> states.uz = 1.0 >>> model.calc_q0_perc_uz_v1() >>> fluxes.perc perc(0.5) >>> fluxes.q0 q0(0.64) >>> states.uz uz(0.16) Due to the same reasons, another increase in numerical accuracy has no impact on percolation but decreases the direct response in the given example: >>> recstep(200) >>> derived.dt = 1.0/recstep >>> states.uz = 1.0 >>> model.calc_q0_perc_uz_v1() >>> fluxes.perc perc(0.5) >>> fluxes.q0 q0(0.421708) >>> states.uz uz(0.378292) """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess flu.perc = 0. flu.q0 = 0. for dummy in range(con.recstep): # First state update related to the upper zone input. sta.uz += der.dt*flu.inuz # Second state update related to percolation. d_perc = min(der.dt*con.percmax*flu.contriarea, sta.uz) sta.uz -= d_perc flu.perc += d_perc # Third state update related to fast runoff response. if sta.uz > 0.: if flu.contriarea > 0.: d_q0 = (der.dt*con.k * (sta.uz/flu.contriarea)**(1.+con.alpha)) d_q0 = min(d_q0, sta.uz) else: d_q0 = sta.uz sta.uz -= d_q0 flu.q0 += d_q0 else: d_q0 = 0.
[ "def", "calc_q0_perc_uz_v1", "(", "self", ")", ":", "con", "=", "self", ".", "parameters", ".", "control", ".", "fastaccess", "der", "=", "self", ".", "parameters", ".", "derived", ".", "fastaccess", "flu", "=", "self", ".", "sequences", ".", "fluxes", "...
29.918478
21.391304
def _update_partition_dci_id(self, tenant_name, dci_id, vrf_prof=None, part_name=None): """Function to update DCI ID of partition. """ self.dcnm_obj.update_project(tenant_name, part_name, dci_id=dci_id, vrf_prof=vrf_prof)
[ "def", "_update_partition_dci_id", "(", "self", ",", "tenant_name", ",", "dci_id", ",", "vrf_prof", "=", "None", ",", "part_name", "=", "None", ")", ":", "self", ".", "dcnm_obj", ".", "update_project", "(", "tenant_name", ",", "part_name", ",", "dci_id", "="...
60.6
17.8
def _load_hooks_settings(self): """load hooks settings""" log.debug("executing _load_hooks_settings") hook_show_widget = self.get_widget("hook_show") hook_show_setting = self.settings.hooks.get_string("show") if hook_show_widget is not None: if hook_show_setting is not None: hook_show_widget.set_text(hook_show_setting)
[ "def", "_load_hooks_settings", "(", "self", ")", ":", "log", ".", "debug", "(", "\"executing _load_hooks_settings\"", ")", "hook_show_widget", "=", "self", ".", "get_widget", "(", "\"hook_show\"", ")", "hook_show_setting", "=", "self", ".", "settings", ".", "hooks...
47.625
10.75
def delete_river(self, river, river_name=None): """ Delete a river """ if isinstance(river, River): river_name = river.name return self._send_request('DELETE', '/_river/%s/' % river_name)
[ "def", "delete_river", "(", "self", ",", "river", ",", "river_name", "=", "None", ")", ":", "if", "isinstance", "(", "river", ",", "River", ")", ":", "river_name", "=", "river", ".", "name", "return", "self", ".", "_send_request", "(", "'DELETE'", ",", ...
33.285714
9.285714
def assembly_cleanup(data): """ cleanup for assembly object """ ## build s2 results data frame data.stats_dfs.s2 = data._build_stat("s2") data.stats_files.s2 = os.path.join(data.dirs.edits, 's2_rawedit_stats.txt') ## write stats for all samples with io.open(data.stats_files.s2, 'w', encoding='utf-8') as outfile: data.stats_dfs.s2.fillna(value=0).astype(np.int).to_string(outfile)
[ "def", "assembly_cleanup", "(", "data", ")", ":", "## build s2 results data frame", "data", ".", "stats_dfs", ".", "s2", "=", "data", ".", "_build_stat", "(", "\"s2\"", ")", "data", ".", "stats_files", ".", "s2", "=", "os", ".", "path", ".", "join", "(", ...
40.6
21.7
def update(self, webhook_url=values.unset, friendly_name=values.unset, reachability_webhooks_enabled=values.unset, acl_enabled=values.unset): """ Update the ServiceInstance :param unicode webhook_url: A URL that will receive event updates when objects are manipulated. :param unicode friendly_name: Human-readable name for this service instance :param bool reachability_webhooks_enabled: True or false - controls whether this instance fires webhooks when client endpoints connect to Sync :param bool acl_enabled: true or false - determines whether token identities must be granted access to Sync objects via the Permissions API in this Service. :returns: Updated ServiceInstance :rtype: twilio.rest.sync.v1.service.ServiceInstance """ data = values.of({ 'WebhookUrl': webhook_url, 'FriendlyName': friendly_name, 'ReachabilityWebhooksEnabled': reachability_webhooks_enabled, 'AclEnabled': acl_enabled, }) payload = self._version.update( 'POST', self._uri, data=data, ) return ServiceInstance(self._version, payload, sid=self._solution['sid'], )
[ "def", "update", "(", "self", ",", "webhook_url", "=", "values", ".", "unset", ",", "friendly_name", "=", "values", ".", "unset", ",", "reachability_webhooks_enabled", "=", "values", ".", "unset", ",", "acl_enabled", "=", "values", ".", "unset", ")", ":", ...
44.5
28.214286
def from_file(self, fname, comment_lead=['c'], compressed_with='use_ext'): """ Read a CNF formula from a file in the DIMACS format. A file name is expected as an argument. A default argument is ``comment_lead`` for parsing comment lines. A given file can be compressed by either gzip, bzip2, or lzma. :param fname: name of a file to parse. :param comment_lead: a list of characters leading comment lines :param compressed_with: file compression algorithm :type fname: str :type comment_lead: list(str) :type compressed_with: str Note that the ``compressed_with`` parameter can be ``None`` (i.e. the file is uncompressed), ``'gzip'``, ``'bzip2'``, ``'lzma'``, or ``'use_ext'``. The latter value indicates that compression type should be automatically determined based on the file extension. Using ``'lzma'`` in Python 2 requires the ``backports.lzma`` package to be additionally installed. Usage example: .. code-block:: python >>> from pysat.formula import CNF >>> cnf1 = CNF() >>> cnf1.from_file('some-file.cnf.gz', compressed_with='gzip') >>> >>> cnf2 = CNF(from_file='another-file.cnf') """ with FileObject(fname, mode='r', compression=compressed_with) as fobj: self.from_fp(fobj.fp, comment_lead)
[ "def", "from_file", "(", "self", ",", "fname", ",", "comment_lead", "=", "[", "'c'", "]", ",", "compressed_with", "=", "'use_ext'", ")", ":", "with", "FileObject", "(", "fname", ",", "mode", "=", "'r'", ",", "compression", "=", "compressed_with", ")", "a...
43
24.657143
def get_md5sum(src_file): """Returns md5sum of file""" with open(src_file, 'rb') as src_data: src_content = src_data.read() return hashlib.md5(src_content).hexdigest()
[ "def", "get_md5sum", "(", "src_file", ")", ":", "with", "open", "(", "src_file", ",", "'rb'", ")", "as", "src_data", ":", "src_content", "=", "src_data", ".", "read", "(", ")", "return", "hashlib", ".", "md5", "(", "src_content", ")", ".", "hexdigest", ...
37.4
6.2
def compose_capability(base, *classes): """Create a new class starting with the base and adding capabilities.""" if _debug: compose_capability._debug("compose_capability %r %r", base, classes) # make sure the base is a Collector if not issubclass(base, Collector): raise TypeError("base must be a subclass of Collector") # make sure you only add capabilities for cls in classes: if not issubclass(cls, Capability): raise TypeError("%s is not a Capability subclass" % (cls,)) # start with everything the base has and add the new ones bases = (base,) + classes # build a new name name = base.__name__ for cls in classes: name += '+' + cls.__name__ # return a new type return type(name, bases, {})
[ "def", "compose_capability", "(", "base", ",", "*", "classes", ")", ":", "if", "_debug", ":", "compose_capability", ".", "_debug", "(", "\"compose_capability %r %r\"", ",", "base", ",", "classes", ")", "# make sure the base is a Collector", "if", "not", "issubclass"...
33.26087
18.913043
def _do_get(self, uri, **kwargs): """ Convinient method for GET requests Returns http request status value from a POST request """ #TODO: # Add error handling. Check for HTTP status here would be much more conveinent than in each calling method scaleioapi_get_headers = {'Content-type':'application/json','Version':'1.0'} self.logger.debug("_do_get() " + "{}/{}".format(self._api_url,uri)) if kwargs: for key, value in kwargs.iteritems(): if key == 'headers': scaleio_get_headersvalue = value try: #response = self._im_session.get("{}/{}".format(self._api_url, uri), headers = scaleioapi_get_headers, payload = scaleio_payload).json() response = self._im_session.get("{}/{}".format(self._api_url, uri), **kwargs).json() #response = self._session.get(url, headers=scaleioapi_post_headers, **kwargs) if response.status_code == requests.codes.ok: return response else: raise RuntimeError("_do_get() - HTTP response error" + response.status_code) except: raise RuntimeError("_do_get() - Communication error with ScaleIO gateway") return response
[ "def", "_do_get", "(", "self", ",", "uri", ",", "*", "*", "kwargs", ")", ":", "#TODO:", "# Add error handling. Check for HTTP status here would be much more conveinent than in each calling method", "scaleioapi_get_headers", "=", "{", "'Content-type'", ":", "'application/json'",...
49
29.153846
def is_available(workshift_profile, shift): """ Check whether a specified user is able to do a specified workshift. Parameters: workshift_profile is the workshift profile for a user shift is a weekly recurring workshift Returns: True if the user has enough free time between the shift's start time and end time to do the shift's required number of hours. False otherwise. """ if shift.week_long: return True start_time = ( shift.start_time if shift.start_time is not None else time(hour=0) ) end_time = ( shift.end_time if shift.end_time is not None else time(hour=23, minute=59) ) relevant_blocks = list() for block in workshift_profile.time_blocks.order_by('start_time'): if block.day == shift.day and block.preference == TimeBlock.BUSY \ and block.start_time < end_time \ and block.end_time > start_time: relevant_blocks.append(block) # Time blocks should be ordered; so go through and see if there is a wide # enough window for the workshifter to do the shift. If there is, # return True. if not relevant_blocks: return True hours_delta = timedelta(hours=float(shift.hours)) # Check the time between shift start and block start block = relevant_blocks.pop(0) start_delta = timedelta( hours=block.start_time.hour - start_time.hour, minutes=block.start_time.minute - start_time.minute, ) if start_delta >= hours_delta: return True while len(relevant_blocks) > 0: block, prev_block = relevant_blocks.pop(0), block # Check the time between the last block and the next block # is larger than the length of the shift start_end_delta = timedelta( hours=block.start_time.hour - prev_block.end_time.hour, minutes=block.start_time.minute - prev_block.end_time.minute, ) if start_end_delta >= hours_delta: return True # Check the time between the end of the time block to the end of the shift end_delta = timedelta( hours=end_time.hour - block.end_time.hour, minutes=end_time.minute - block.end_time.minute, ) if end_delta >= hours_delta: return True return False
[ "def", "is_available", "(", "workshift_profile", ",", "shift", ")", ":", "if", "shift", ".", "week_long", ":", "return", "True", "start_time", "=", "(", "shift", ".", "start_time", "if", "shift", ".", "start_time", "is", "not", "None", "else", "time", "(",...
31.383562
21.493151
def _two_to_one(datadir): """After this command, your environment will be converted to format version {} and will not work with Datacats versions beyond and including 1.0.0. This format version doesn't support multiple sites, and after this only your "primary" site will be usable, though other sites will be maintained if you wish to do a migration back to a version which supports multisite. Would you like to continue the migration? (y/n) [n]:""" _, env_name = _split_path(datadir) print 'Making sure that containers are stopped...' # New-style names remove_container('datacats_web_{}_primary'.format(env_name)) remove_container('datacats_postgres_{}_primary'.format(env_name)) remove_container('datacats_solr_{}_primary'.format(env_name)) print 'Doing conversion...' if exists(path_join(datadir, '.version')): os.remove(path_join(datadir, '.version')) to_move = (['files', 'passwords.ini', 'run', 'solr'] + (['postgres'] if not is_boot2docker() else [])) web_command( command=['/scripts/migrate.sh', '/project/data/sites/primary', '/project/data'] + to_move, ro={scripts.get_script_path('migrate.sh'): '/scripts/migrate.sh'}, rw={datadir: '/project/data'} ) pgdata_name = 'datacats_pgdata_{}_primary'.format(env_name) if is_boot2docker() and inspect_container(pgdata_name): rename_container(pgdata_name, 'datacats_pgdata_{}'.format(env_name)) print 'Doing cleanup...' with open(path_join(datadir, 'project-dir')) as pd: datacats_env_location = path_join(pd.read(), '.datacats-environment') cp = SafeConfigParser() cp.read(datacats_env_location) # We need to move the port OUT of site_primary section and INTO datacats cp.set('datacats', 'port', cp.get('site_primary', 'port')) cp.remove_section('site_primary') with open(datacats_env_location, 'w') as config: cp.write(config) cp = SafeConfigParser() cp.read(path_join(datadir, 'passwords.ini')) # This isn't needed in this version cp.remove_option('passwords', 'beaker_session_secret') with open(path_join(datadir, 'passwords.ini'), 'w') as config: cp.write(config)
[ "def", "_two_to_one", "(", "datadir", ")", ":", "_", ",", "env_name", "=", "_split_path", "(", "datadir", ")", "print", "'Making sure that containers are stopped...'", "# New-style names", "remove_container", "(", "'datacats_web_{}_primary'", ".", "format", "(", "env_na...
37.896552
22.706897
def run_duplicated_samples(in_prefix, in_type, out_prefix, base_dir, options): """Runs step1 (duplicated samples). :param in_prefix: the prefix of the input files. :param in_type: the type of the input files. :param out_prefix: the output prefix. :param base_dir: the output directory. :param options: the options needed. :type in_prefix: str :type in_type: str :type out_prefix: str :type base_dir: str :type options: list :returns: a tuple containing the prefix of the output files (the input prefix for the next script) and the type of the output files (``tfile``). This function calls the :py:mod:`pyGenClean.DupSamples.duplicated_samples` module. The required file type for this module is ``tfile``, hence the need to use the :py:func:`check_input_files` to check if the file input file type is the good one, or to create it if needed. """ # Creating the output directory os.mkdir(out_prefix) # We know we need tfile required_type = "tfile" check_input_files(in_prefix, in_type, required_type) # We need to inject the name of the input file and the name of the output # prefix script_prefix = os.path.join(out_prefix, "dup_samples") options += ["--{}".format(required_type), in_prefix, "--out", script_prefix] # We run the script try: duplicated_samples.main(options) except duplicated_samples.ProgramError as e: msg = "duplicated_samples: {}".format(e) raise ProgramError(msg) # Reading the number of duplicated samples duplicated_count = defaultdict(int) if os.path.isfile(script_prefix + ".duplicated_samples.tfam"): with open(script_prefix + ".duplicated_samples.tfam", "r") as i_file: duplicated_count = Counter([ tuple(createRowFromPlinkSpacedOutput(line)[:2]) for line in i_file ]) # Counting the number of zeroed out genotypes per duplicated sample zeroed_out = defaultdict(int) if os.path.isfile(script_prefix + ".zeroed_out"): with open(script_prefix + ".zeroed_out", "r") as i_file: zeroed_out = Counter([ tuple(line.rstrip("\r\n").split("\t")[:2]) for line in i_file.read().splitlines()[1:] ]) nb_zeroed_out = sum(zeroed_out.values()) # Checking the not good enough samples not_good_enough = set() if os.path.isfile(script_prefix + ".not_good_enough"): with open(script_prefix + ".not_good_enough", "r") as i_file: not_good_enough = { tuple(line.rstrip("\r\n").split("\t")[:4]) for line in i_file.read().splitlines()[1:] } # Checking which samples were chosen chosen_sample = set() if os.path.isfile(script_prefix + ".chosen_samples.info"): with open(script_prefix + ".chosen_samples.info", "r") as i_file: chosen_sample = { tuple(line.rstrip("\r\n").split("\t")) for line in i_file.read().splitlines()[1:] } # Finding if some 'not_good_enough' samples were chosen not_good_still = {s[2:] for s in chosen_sample & not_good_enough} # We create a LaTeX summary latex_file = os.path.join(script_prefix + ".summary.tex") try: with open(latex_file, "w") as o_file: print >>o_file, latex_template.subsection( duplicated_samples.pretty_name ) text = ( "A total of {:,d} duplicated sample{} {} found.".format( len(duplicated_count), "s" if len(duplicated_count) > 1 else "", "were" if len(duplicated_count) > 1 else "was", ) ) print >>o_file, latex_template.wrap_lines(text) if len(duplicated_count) > 0: text = ( "While merging duplicates, a total of {:,d} genotype{} {} " "zeroed out. A total of {:,d} sample{} {} found to be not " "good enough for duplicate completion.".format( nb_zeroed_out, "s" if nb_zeroed_out > 1 else "", "were" if nb_zeroed_out > 1 else "was", len(not_good_enough), "s" if len(not_good_enough) > 1 else "", "were" if len(not_good_enough) > 1 else "was", ) ) print >>o_file, latex_template.wrap_lines(text) table_label = re.sub( r"[/\\]", "_", script_prefix, ) + "_dup_samples" text = ( r"Table~\ref{" + table_label + "} summarizes the number " "of each duplicated sample with some characteristics." ) print >>o_file, latex_template.wrap_lines(text) if len(not_good_still) > 0: text = latex_template.textbf( "There {} {:,d} sample{} that {} not good due to low " "completion or concordance, but {} still selected as " "the best duplicate (see Table~{}).".format( "were" if len(not_good_still) > 1 else "was", len(not_good_still), "s" if len(not_good_still) > 1 else "", "were" if len(not_good_still) > 1 else "was", "were" if len(not_good_still) > 1 else "was", r"~\ref{" + table_label + "}", ) ) print >>o_file, latex_template.wrap_lines(text) # Getting the template longtable_template = latex_template.jinja2_env.get_template( "longtable_template.tex", ) # The table caption table_caption = ( "Summary of the {:,d} duplicated sample{}. The number of " "duplicates and the total number of zeroed out genotypes " "are shown.".format( len(duplicated_count), "s" if len(duplicated_count) > 1 else "", ) ) if len(not_good_still) > 0: table_caption += ( " A total of {:,d} sample{} (highlighted) {} not good " "enough for completion, but {} chosen as the best " "duplicate, and {} still in the final " "dataset).".format( len(not_good_still), "s" if len(not_good_still) > 1 else "", "were" if len(not_good_still) > 1 else "was", "were" if len(not_good_still) > 1 else "was", "are" if len(not_good_still) > 1 else "is", ) ) duplicated_samples_list = duplicated_count.most_common() print >>o_file, longtable_template.render( table_caption=table_caption, table_label=table_label, nb_col=4, col_alignments="llrr", text_size="scriptsize", header_data=[("FID", 1), ("IID", 1), ("Nb Duplicate", 1), ("Nb Zeroed", 1)], tabular_data=[ [latex_template.sanitize_tex(fid), latex_template.sanitize_tex(iid), "{:,d}".format(nb), "{:,d}".format(zeroed_out[(fid, iid)])] for (fid, iid), nb in duplicated_samples_list ], highlighted=[ (fid, iid) in not_good_still for fid, iid in [i[0] for i in duplicated_samples_list] ], ) except IOError: msg = "{}: cannot write LaTeX summary".format(latex_file) raise ProgramError(msg) # Writing the summary results with open(os.path.join(base_dir, "results_summary.txt"), "a") as o_file: print >>o_file, "# {}".format(script_prefix) counter = Counter(duplicated_count.values()).most_common() if counter: print >>o_file, "Number of replicated samples" else: print >>o_file, "Number of replicated samples\t0" for rep_type, rep_count in counter: print >>o_file, " - x{}\t{:,d}\t\t-{:,d}".format( rep_type, rep_count, (rep_count * rep_type) - rep_count, ) print >>o_file, ("Poorly chosen replicated " "samples\t{:,d}".format(len(not_good_still))) print >>o_file, "---" # We know this step does produce a new data set (tfile), so we return it return _StepResult( next_file=os.path.join(out_prefix, "dup_samples.final"), next_file_type="tfile", latex_summary=latex_file, description=duplicated_samples.desc, long_description=duplicated_samples.long_desc, graph_path=None, )
[ "def", "run_duplicated_samples", "(", "in_prefix", ",", "in_type", ",", "out_prefix", ",", "base_dir", ",", "options", ")", ":", "# Creating the output directory", "os", ".", "mkdir", "(", "out_prefix", ")", "# We know we need tfile", "required_type", "=", "\"tfile\""...
41.473214
20.491071
def TRM(f,a,b): """ Calculate TRM using tanh relationship TRM(f)=a*math.tanh(b*f) """ m = float(a) * math.tanh(float(b) * float(f)) return float(m)
[ "def", "TRM", "(", "f", ",", "a", ",", "b", ")", ":", "m", "=", "float", "(", "a", ")", "*", "math", ".", "tanh", "(", "float", "(", "b", ")", "*", "float", "(", "f", ")", ")", "return", "float", "(", "m", ")" ]
24
9.714286
def reset(self): """ Reset the state of the sandbox. http://docs.fiesta.cc/sandbox.html#post--reset """ path = 'reset' request_data = {} # Need to put data into the request to force urllib2 to make it a POST request response_data = self.request(path, request_data) success = response_data['reset'] # True of False return success
[ "def", "reset", "(", "self", ")", ":", "path", "=", "'reset'", "request_data", "=", "{", "}", "# Need to put data into the request to force urllib2 to make it a POST request", "response_data", "=", "self", ".", "request", "(", "path", ",", "request_data", ")", "succes...
39.5
17.5
def aggregate_hazard_preparation(self): """This function is doing the aggregate hazard layer. It will prepare the aggregate layer and intersect hazard polygons with aggregation areas and assign hazard class. """ LOGGER.info('ANALYSIS : Aggregate hazard preparation') self.set_state_process('hazard', 'Make hazard layer valid') self.hazard = clean_layer(self.hazard) self.debug_layer(self.hazard) self.set_state_process( 'aggregation', 'Union hazard polygons with aggregation areas and assign ' 'hazard class') self._aggregate_hazard_impacted = union(self.hazard, self.aggregation) self.debug_layer(self._aggregate_hazard_impacted)
[ "def", "aggregate_hazard_preparation", "(", "self", ")", ":", "LOGGER", ".", "info", "(", "'ANALYSIS : Aggregate hazard preparation'", ")", "self", ".", "set_state_process", "(", "'hazard'", ",", "'Make hazard layer valid'", ")", "self", ".", "hazard", "=", "clean_lay...
43.529412
18.117647
def __nt_relpath(path, start=os.curdir): """Return a relative version of a path""" if not path: raise ValueError("no path specified") start_list = os.path.abspath(start).split(os.sep) path_list = os.path.abspath(path).split(os.sep) if start_list[0].lower() != path_list[0].lower(): unc_path, rest = os.path.splitunc(path) unc_start, rest = os.path.splitunc(start) if bool(unc_path) ^ bool(unc_start): raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)" \ % (path, start)) else: raise ValueError("path is on drive %s, start on drive %s" \ % (path_list[0], start_list[0])) # Work out how much of the filepath is shared by start and path. for i in range(min(len(start_list), len(path_list))): if start_list[i].lower() != path_list[i].lower(): break else: i += 1 pass rel_list = [os.pardir] * (len(start_list)-i) + path_list[i:] if not rel_list: return os.curdir return os.path.join(*rel_list)
[ "def", "__nt_relpath", "(", "path", ",", "start", "=", "os", ".", "curdir", ")", ":", "if", "not", "path", ":", "raise", "ValueError", "(", "\"no path specified\"", ")", "start_list", "=", "os", ".", "path", ".", "abspath", "(", "start", ")", ".", "spl...
42.4
18.2
def generate_dep_names(self, target: Target): """Generate names of all dependencies (descendants) of `target`.""" yield from sorted(get_descendants(self.target_graph, target.name))
[ "def", "generate_dep_names", "(", "self", ",", "target", ":", "Target", ")", ":", "yield", "from", "sorted", "(", "get_descendants", "(", "self", ".", "target_graph", ",", "target", ".", "name", ")", ")" ]
64.666667
13
def init_mimedb(): """Initialize the local MIME database.""" global mimedb try: mimedb = mimetypes.MimeTypes(strict=False) except Exception as msg: log.error(LOG_CHECK, "could not initialize MIME database: %s" % msg) return # For Opera bookmark files (opera6.adr) add_mimetype(mimedb, 'text/plain', '.adr') # To recognize PHP files as HTML with content check. add_mimetype(mimedb, 'application/x-httpd-php', '.php') # To recognize WML files add_mimetype(mimedb, 'text/vnd.wap.wml', '.wml')
[ "def", "init_mimedb", "(", ")", ":", "global", "mimedb", "try", ":", "mimedb", "=", "mimetypes", ".", "MimeTypes", "(", "strict", "=", "False", ")", "except", "Exception", "as", "msg", ":", "log", ".", "error", "(", "LOG_CHECK", ",", "\"could not initializ...
38.571429
16.357143
def _check_file_exists_unix(self, remote_cmd=""): """Check if the dest_file already exists on the file system (return boolean).""" if self.direction == "put": self.ssh_ctl_chan._enter_shell() remote_cmd = "ls {}".format(self.file_system) remote_out = self.ssh_ctl_chan.send_command( remote_cmd, expect_string=r"[\$#]" ) self.ssh_ctl_chan._return_cli() return self.dest_file in remote_out elif self.direction == "get": return os.path.exists(self.dest_file)
[ "def", "_check_file_exists_unix", "(", "self", ",", "remote_cmd", "=", "\"\"", ")", ":", "if", "self", ".", "direction", "==", "\"put\"", ":", "self", ".", "ssh_ctl_chan", ".", "_enter_shell", "(", ")", "remote_cmd", "=", "\"ls {}\"", ".", "format", "(", "...
47.333333
9.166667
def write(self, data: bytes) -> None: """ Write the data. """ if self.finished(): if self._exc: raise self._exc raise WriteAfterFinishedError if not data: return try: self._delegate.write_data(data, finished=False) except BaseWriteException as e: self._finished.set() if self._exc is None: self._exc = e raise
[ "def", "write", "(", "self", ",", "data", ":", "bytes", ")", "->", "None", ":", "if", "self", ".", "finished", "(", ")", ":", "if", "self", ".", "_exc", ":", "raise", "self", ".", "_exc", "raise", "WriteAfterFinishedError", "if", "not", "data", ":", ...
21.136364
18.045455
def gates_in_isa(isa): """ Generate the full gateset associated with an ISA. :param ISA isa: The instruction set architecture for a QPU. :return: A sequence of Gate objects encapsulating all gates compatible with the ISA. :rtype: Sequence[Gate] """ gates = [] for q in isa.qubits: if q.dead: # TODO: dead qubits may in the future lead to some implicit re-indexing continue if q.type in ["Xhalves"]: gates.extend([ Gate("I", [], [unpack_qubit(q.id)]), Gate("RX", [np.pi / 2], [unpack_qubit(q.id)]), Gate("RX", [-np.pi / 2], [unpack_qubit(q.id)]), Gate("RX", [np.pi], [unpack_qubit(q.id)]), Gate("RX", [-np.pi], [unpack_qubit(q.id)]), Gate("RZ", [THETA], [unpack_qubit(q.id)]), ]) else: # pragma no coverage raise ValueError("Unknown qubit type: {}".format(q.type)) for e in isa.edges: if e.dead: continue targets = [unpack_qubit(t) for t in e.targets] if e.type in ["CZ", "ISWAP"]: gates.append(Gate(e.type, [], targets)) gates.append(Gate(e.type, [], targets[::-1])) elif e.type in ["CPHASE"]: gates.append(Gate(e.type, [THETA], targets)) gates.append(Gate(e.type, [THETA], targets[::-1])) else: # pragma no coverage raise ValueError("Unknown edge type: {}".format(e.type)) return gates
[ "def", "gates_in_isa", "(", "isa", ")", ":", "gates", "=", "[", "]", "for", "q", "in", "isa", ".", "qubits", ":", "if", "q", ".", "dead", ":", "# TODO: dead qubits may in the future lead to some implicit re-indexing", "continue", "if", "q", ".", "type", "in", ...
39.078947
18.973684
def relations(cls): """Return a `list` of relationship names or the given model """ return [c.key for c in cls.__mapper__.iterate_properties if isinstance(c, RelationshipProperty)]
[ "def", "relations", "(", "cls", ")", ":", "return", "[", "c", ".", "key", "for", "c", "in", "cls", ".", "__mapper__", ".", "iterate_properties", "if", "isinstance", "(", "c", ",", "RelationshipProperty", ")", "]" ]
43.2
12
def kuhn_munkres(G): # maximum profit bipartite matching in O(n^4) """Maximum profit perfect matching for minimum cost perfect matching just inverse the weights :param G: squared weight matrix of a complete bipartite graph :complexity: :math:`O(n^4)` """ assert len(G) == len(G[0]) n = len(G) mu = [None] * n # Empty matching mv = [None] * n lu = [max(row) for row in G] # Trivial labels lv = [0] * n for u0 in range(n): if mu[u0] is None: # Free node while True: au = [False] * n # Empty alternating tree av = [False] * n if improve_matching(G, u0, mu, mv, au, av, lu, lv): break improve_labels(G, au, av, lu, lv) return (mu, sum(lu) + sum(lv))
[ "def", "kuhn_munkres", "(", "G", ")", ":", "# maximum profit bipartite matching in O(n^4)", "assert", "len", "(", "G", ")", "==", "len", "(", "G", "[", "0", "]", ")", "n", "=", "len", "(", "G", ")", "mu", "=", "[", "None", "]", "*", "n", "# Empty mat...
35.565217
17.26087
def fallback_schema_from_field(self, field): """ Fallback schema for field that isn't inspected properly by DRF and probably won't land in upstream canon due to its hacky nature only for doc purposes """ title = force_text(field.label) if field.label else '' description = force_text(field.help_text) if field.help_text else '' # since we can't really inspect dictfield and jsonfield, at least display object as type # instead of string if isinstance(field, (serializers.DictField, serializers.JSONField)): return coreschema.Object( properties={}, title=title, description=description )
[ "def", "fallback_schema_from_field", "(", "self", ",", "field", ")", ":", "title", "=", "force_text", "(", "field", ".", "label", ")", "if", "field", ".", "label", "else", "''", "description", "=", "force_text", "(", "field", ".", "help_text", ")", "if", ...
47.266667
21.066667
def _set_state(self, state, force=True): """ Setting force to True allows for changing a state after it COMPLETED. This would otherwise be invalid. """ self._setstate(state, True) self.last_state_change = time.time()
[ "def", "_set_state", "(", "self", ",", "state", ",", "force", "=", "True", ")", ":", "self", ".", "_setstate", "(", "state", ",", "True", ")", "self", ".", "last_state_change", "=", "time", ".", "time", "(", ")" ]
36.857143
6.571429
def split_overlays(self): "Deprecated method to split overlays inside the HoloMap." if util.config.future_deprecations: self.param.warning("split_overlays is deprecated and is now " "a private method.") return self._split_overlays()
[ "def", "split_overlays", "(", "self", ")", ":", "if", "util", ".", "config", ".", "future_deprecations", ":", "self", ".", "param", ".", "warning", "(", "\"split_overlays is deprecated and is now \"", "\"a private method.\"", ")", "return", "self", ".", "_split_over...
49
15
def list_metrics(self, include=None, interval="1d", **kwargs): """Get statistics. :param list[str] include: List of fields included in response. None, or an empty list will return all fields. Fields: transactions, successful_api_calls, failed_api_calls, successful_handshakes, pending_bootstraps, successful_bootstraps, failed_bootstraps, registrations, updated_registrations, expired_registrations, deleted_registrations :param str interval: Group data by this interval in days, weeks or hours. Sample values: 2h, 3w, 4d. :param datetime start: Fetch the data with timestamp greater than or equal to this value. The parameter is not mandatory, if the period is specified. :param datetime end: Fetch the data with timestamp less than this value. The parameter is not mandatory, if the period is specified. :param str period: Period. Fetch the data for the period in days, weeks or hours. Sample values: 2h, 3w, 4d. The parameter is not mandatory, if the start and end time are specified :param int limit: The number of devices to retrieve :param str order: The ordering direction, ascending (asc) or descending (desc) :param str after: Get metrics after/starting at given metric ID :returns: a list of :py:class:`Metric` objects :rtype: PaginatedResponse """ self._verify_arguments(interval, kwargs) include = Metric._map_includes(include) kwargs.update(dict(include=include, interval=interval)) api = self._get_api(statistics.StatisticsApi) return PaginatedResponse(api.get_metrics, lwrap_type=Metric, **kwargs)
[ "def", "list_metrics", "(", "self", ",", "include", "=", "None", ",", "interval", "=", "\"1d\"", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_verify_arguments", "(", "interval", ",", "kwargs", ")", "include", "=", "Metric", ".", "_map_includes", "("...
61.785714
26.964286
def load_rabit_checkpoint(self): """Initialize the model by load from rabit checkpoint. Returns ------- version: integer The version number of the model. """ version = ctypes.c_int() _check_call(_LIB.XGBoosterLoadRabitCheckpoint( self.handle, ctypes.byref(version))) return version.value
[ "def", "load_rabit_checkpoint", "(", "self", ")", ":", "version", "=", "ctypes", ".", "c_int", "(", ")", "_check_call", "(", "_LIB", ".", "XGBoosterLoadRabitCheckpoint", "(", "self", ".", "handle", ",", "ctypes", ".", "byref", "(", "version", ")", ")", ")"...
30.416667
13.333333
def submit(self, func, *args, **kwargs): """Submit a function to the pool, `self.submit(function,arg1,arg2,arg3=3)`""" with self._shutdown_lock: if PY3 and self._broken: raise BrokenProcessPool( "A child process terminated " "abruptly, the process pool is not usable anymore" ) if self._shutdown_thread: raise RuntimeError("cannot schedule new futures after shutdown") callback = kwargs.pop("callback", self.default_callback) future = NewFuture( self._timeout, args, kwargs, callback=callback, catch_exception=self.catch_exception, ) w = _WorkItem(future, func, args, kwargs) self._pending_work_items[self._queue_count] = w self._work_ids.put(self._queue_count) self._queue_count += 1 self._result_queue.put(None) self._start_queue_management_thread() if PY2: self._adjust_process_count() self._all_futures.add(future) return future
[ "def", "submit", "(", "self", ",", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "with", "self", ".", "_shutdown_lock", ":", "if", "PY3", "and", "self", ".", "_broken", ":", "raise", "BrokenProcessPool", "(", "\"A child process terminated \"...
40.448276
13.172414
def parse_multi_object_delete_response(data): """Parser for Multi-Object Delete API response. :param data: XML response body content from service. :return: Returns list of error objects for each delete object that had an error. """ root = S3Element.fromstring('MultiObjectDeleteResult', data) return [ MultiDeleteError(errtag.get_child_text('Key'), errtag.get_child_text('Code'), errtag.get_child_text('Message')) for errtag in root.findall('Error') ]
[ "def", "parse_multi_object_delete_response", "(", "data", ")", ":", "root", "=", "S3Element", ".", "fromstring", "(", "'MultiObjectDeleteResult'", ",", "data", ")", "return", "[", "MultiDeleteError", "(", "errtag", ".", "get_child_text", "(", "'Key'", ")", ",", ...
33.5625
20.6875
def weekdays(first_day=None): """Returns a list of weekday names. Arguments --------- first_day : str, default None The first day of the week. If not given, 'Monday' is used. Returns ------- list A list of weekday names. """ if first_day is None: first_day = 'Monday' ix = _lower_weekdays().index(first_day.lower()) return _double_weekdays()[ix:ix+7]
[ "def", "weekdays", "(", "first_day", "=", "None", ")", ":", "if", "first_day", "is", "None", ":", "first_day", "=", "'Monday'", "ix", "=", "_lower_weekdays", "(", ")", ".", "index", "(", "first_day", ".", "lower", "(", ")", ")", "return", "_double_weekda...
23.764706
18.588235
def to_wire(self, origin=None, max_size=0, **kw): """Return a string containing the message in DNS compressed wire format. Additional keyword arguments are passed to the rrset to_wire() method. @param origin: The origin to be appended to any relative names. @type origin: dns.name.Name object @param max_size: The maximum size of the wire format output; default is 0, which means 'the message's request payload, if nonzero, or 65536'. @type max_size: int @raises dns.exception.TooBig: max_size was exceeded @rtype: string """ if max_size == 0: if self.request_payload != 0: max_size = self.request_payload else: max_size = 65535 if max_size < 512: max_size = 512 elif max_size > 65535: max_size = 65535 r = dns.renderer.Renderer(self.id, self.flags, max_size, origin) for rrset in self.question: r.add_question(rrset.name, rrset.rdtype, rrset.rdclass) for rrset in self.answer: r.add_rrset(dns.renderer.ANSWER, rrset, **kw) for rrset in self.authority: r.add_rrset(dns.renderer.AUTHORITY, rrset, **kw) if self.edns >= 0: r.add_edns(self.edns, self.ednsflags, self.payload, self.options) for rrset in self.additional: r.add_rrset(dns.renderer.ADDITIONAL, rrset, **kw) r.write_header() if not self.keyname is None: r.add_tsig(self.keyname, self.keyring[self.keyname], self.fudge, self.original_id, self.tsig_error, self.other_data, self.request_mac, self.keyalgorithm) self.mac = r.mac return r.get_wire()
[ "def", "to_wire", "(", "self", ",", "origin", "=", "None", ",", "max_size", "=", "0", ",", "*", "*", "kw", ")", ":", "if", "max_size", "==", "0", ":", "if", "self", ".", "request_payload", "!=", "0", ":", "max_size", "=", "self", ".", "request_payl...
39.888889
17.6
def print_callback(val): """ Internal function. This function is called via a call back returning from IPC to Cython to Python. It tries to perform incremental printing to IPython Notebook or Jupyter Notebook and when all else fails, just prints locally. """ success = False try: # for reasons I cannot fathom, regular printing, even directly # to io.stdout does not work. # I have to intrude rather deep into IPython to make it behave if have_ipython: if InteractiveShell.initialized(): IPython.display.publish_display_data({'text/plain':val,'text/html':'<pre>' + val + '</pre>'}) success = True except: pass if not success: print(val) sys.stdout.flush()
[ "def", "print_callback", "(", "val", ")", ":", "success", "=", "False", "try", ":", "# for reasons I cannot fathom, regular printing, even directly", "# to io.stdout does not work.", "# I have to intrude rather deep into IPython to make it behave", "if", "have_ipython", ":", "if", ...
35.227273
22.772727
def wait(self, condition, interval, *args): """ :Description: Create an interval in vm.window, will clear interval after condition met. :param condition: Condition in javascript to pass to interval. :example: '$el.innerText == "cheesecake"' :example: '$el[0].disabled && $el[1].disabled' :type condition: string :param interval: Time in milliseconds to execute interval. :type interval: int or float :param *args: WebElement or selector of condition element. :type *args: tuple :return: string """ hid = lambda: '$' + str(uuid.uuid1())[:8] handle = hid() if len(args): element_handle = hid() self.browser.execute_script( 'window["{}"] = [];'.format(element_handle) ) # create element container in window scope for el in args: if isinstance(el, string_types): # assume selector self.browser.execute_script('window["{}"].push({});'.format( element_handle, 'function() { return document.querySelector("%s") }' % el)) else: # assume web element self.browser.execute_script( 'window["{}"].push(arguments[0]);'.format(element_handle), el) if len(args) == 1: condition = condition.replace('$el', 'window["{}"][0]{}'.format( element_handle, '()' if isinstance(args[0], string_types) else '')) else: regex = r'(\$el\[([0-9]{0,3})\])' results = re.findall(regex, condition) # [('$el[0]', '0'), ('$el[1]', '1'), ...] for result in results: pos = eval(result[1]) if pos + 1 <= len(args): condition = condition.replace(result[0], 'window["{}"][{}]{}'.format( element_handle, pos, '()' if isinstance(args[pos], string_types) else '')) self.browser.execute_script( 'window["%s"]=window.setInterval(function(){if(%s){ \ (window.clearInterval(window["%s"])||true)&&(window["%s"]=-1); \ delete window["%s"];}}, %s)' % (handle, condition, handle, handle, \ element_handle, interval)) # create interval else: self.browser.execute_script( 'window["%s"]=window.setInterval(function(){if(%s){ \ (window.clearInterval(window["%s"])||true)&&(window["%s"]=-1);}}, %s)' % ( handle, condition, handle, handle, interval)) # create interval return handle
[ "def", "wait", "(", "self", ",", "condition", ",", "interval", ",", "*", "args", ")", ":", "hid", "=", "lambda", ":", "'$'", "+", "str", "(", "uuid", ".", "uuid1", "(", ")", ")", "[", ":", "8", "]", "handle", "=", "hid", "(", ")", "if", "len"...
50.45283
22.226415
def issubclass(cls, ifaces): """Check if the given class is an implementation of the given iface.""" ifaces = _ensure_ifaces_tuple(ifaces) for iface in ifaces: return all(( _check_for_definition( iface, cls, '__iclassattribute__', _is_attribute, ), _check_for_definition( iface, cls, '__iproperty__', _is_property, ), _check_for_definition( iface, cls, '__imethod__', _is_method, ), _check_for_definition( iface, cls, '__iclassmethod__', _is_classmethod, ), ))
[ "def", "issubclass", "(", "cls", ",", "ifaces", ")", ":", "ifaces", "=", "_ensure_ifaces_tuple", "(", "ifaces", ")", "for", "iface", "in", "ifaces", ":", "return", "all", "(", "(", "_check_for_definition", "(", "iface", ",", "cls", ",", "'__iclassattribute__...
26.16129
15.032258
def get_record(self, msg_id): """Get a specific Task Record, by msg_id.""" cursor = self._db.execute("""SELECT * FROM %s WHERE msg_id==?"""%self.table, (msg_id,)) line = cursor.fetchone() if line is None: raise KeyError("No such msg: %r"%msg_id) return self._list_to_dict(line)
[ "def", "get_record", "(", "self", ",", "msg_id", ")", ":", "cursor", "=", "self", ".", "_db", ".", "execute", "(", "\"\"\"SELECT * FROM %s WHERE msg_id==?\"\"\"", "%", "self", ".", "table", ",", "(", "msg_id", ",", ")", ")", "line", "=", "cursor", ".", "...
46.142857
6.857143
def match(self, query=None, **kwargs): """Try to match the current record to the database.""" from invenio.search_engine import perform_request_search if not query: # We use default setup recid = self.record["001"][0][3] return perform_request_search(p="035:%s" % (recid,), of="id") else: if "recid" not in kwargs: kwargs["recid"] = self.record["001"][0][3] return perform_request_search(p=query % kwargs, of="id")
[ "def", "match", "(", "self", ",", "query", "=", "None", ",", "*", "*", "kwargs", ")", ":", "from", "invenio", ".", "search_engine", "import", "perform_request_search", "if", "not", "query", ":", "# We use default setup", "recid", "=", "self", ".", "record", ...
45.692308
12.769231
def directories(self): """ Get a generator that yields all subdirectories in the directory. """ dirlist_p = new_gp_object("CameraList") lib.gp_camera_folder_list_folders(self._cam._cam, self.path.encode(), dirlist_p, self._cam._ctx) for idx in range(lib.gp_list_count(dirlist_p)): name = os.path.join( self.path, get_string(lib.gp_list_get_name, dirlist_p, idx)) yield Directory(name=name, parent=self, camera=self._cam) lib.gp_list_free(dirlist_p)
[ "def", "directories", "(", "self", ")", ":", "dirlist_p", "=", "new_gp_object", "(", "\"CameraList\"", ")", "lib", ".", "gp_camera_folder_list_folders", "(", "self", ".", "_cam", ".", "_cam", ",", "self", ".", "path", ".", "encode", "(", ")", ",", "dirlist...
51.636364
16.636364
def calcAspectRatioFromCorners(corners, in_plane=False): ''' simple and better alg. than below in_plane -> whether object has no tilt, but only rotation and translation ''' q = corners l0 = [q[0, 0], q[0, 1], q[1, 0], q[1, 1]] l1 = [q[0, 0], q[0, 1], q[-1, 0], q[-1, 1]] l2 = [q[2, 0], q[2, 1], q[3, 0], q[3, 1]] l3 = [q[2, 0], q[2, 1], q[1, 0], q[1, 1]] a1 = line.length(l0) / line.length(l1) a2 = line.length(l2) / line.length(l3) if in_plane: # take aspect ration from more rectangular corner if (abs(0.5 * np.pi - abs(line.angle2(l0, l1))) < abs(0.5 * np.pi - abs(line.angle2(l2, l3)))): return a1 else: return a2 return 0.5 * (a1 + a2)
[ "def", "calcAspectRatioFromCorners", "(", "corners", ",", "in_plane", "=", "False", ")", ":", "q", "=", "corners", "l0", "=", "[", "q", "[", "0", ",", "0", "]", ",", "q", "[", "0", ",", "1", "]", ",", "q", "[", "1", ",", "0", "]", ",", "q", ...
30.44
21.16
def indexes(self, indexes): """ :type indexes: list[int] """ self._indexes = indexes self._index = len(indexes) / 2
[ "def", "indexes", "(", "self", ",", "indexes", ")", ":", "self", ".", "_indexes", "=", "indexes", "self", ".", "_index", "=", "len", "(", "indexes", ")", "/", "2" ]
34
6
def get_prep_value(self, value): """ Convert an Enum value into a string for the database """ if value is None: return None if isinstance(value, self.enum): return value.name raise ValueError("Unknown value {value:r} of type {cls}".format( value=value, cls=type(value)))
[ "def", "get_prep_value", "(", "self", ",", "value", ")", ":", "if", "value", "is", "None", ":", "return", "None", "if", "isinstance", "(", "value", ",", "self", ".", "enum", ")", ":", "return", "value", ".", "name", "raise", "ValueError", "(", "\"Unkno...
34.5
10.5
def _last_of_quarter(self, day_of_week=None): """ Modify to the last occurrence of a given day of the week in the current quarter. If no day_of_week is provided, modify to the last day of the quarter. Use the supplied consts to indicate the desired day_of_week, ex. DateTime.MONDAY. :type day_of_week: int or None :rtype: DateTime """ return self.on(self.year, self.quarter * 3, 1).last_of("month", day_of_week)
[ "def", "_last_of_quarter", "(", "self", ",", "day_of_week", "=", "None", ")", ":", "return", "self", ".", "on", "(", "self", ".", "year", ",", "self", ".", "quarter", "*", "3", ",", "1", ")", ".", "last_of", "(", "\"month\"", ",", "day_of_week", ")" ...
39.5
20.666667
def registerkbevent(self, keys, modifiers, fn_name, *args): """ Register keystroke events @param keys: key to listen @type keys: string @param modifiers: control / alt combination using gtk MODIFIERS @type modifiers: int @param fn_name: Callback function @type fn_name: function @param *args: arguments to be passed to the callback function @type *args: var args @return: 1 if registration was successful, 0 if not. @rtype: integer """ event_name = "kbevent%s%s" % (keys, modifiers) self._pollEvents._callback[event_name] = [event_name, fn_name, args] return self._remote_registerkbevent(keys, modifiers)
[ "def", "registerkbevent", "(", "self", ",", "keys", ",", "modifiers", ",", "fn_name", ",", "*", "args", ")", ":", "event_name", "=", "\"kbevent%s%s\"", "%", "(", "keys", ",", "modifiers", ")", "self", ".", "_pollEvents", ".", "_callback", "[", "event_name"...
37.684211
17.157895
def confd_state_internal_callpoints_snmp_notification_subscription_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring") internal = ET.SubElement(confd_state, "internal") callpoints = ET.SubElement(internal, "callpoints") snmp_notification_subscription = ET.SubElement(callpoints, "snmp-notification-subscription") id = ET.SubElement(snmp_notification_subscription, "id") id.text = kwargs.pop('id') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "confd_state_internal_callpoints_snmp_notification_subscription_id", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "confd_state", "=", "ET", ".", "SubElement", "(", "config", ",", "\"confd-state...
51.692308
23.615385
def edit_user_push_restrictions(self, *users): """ :calls: `POST /repos/:owner/:repo/branches/:branch/protection/restrictions <https://developer.github.com/v3/repos/branches>`_ :users: list of strings """ assert all(isinstance(element, (str, unicode)) or isinstance(element, (str, unicode)) for element in users), users headers, data = self._requester.requestJsonAndCheck( "POST", self.protection_url + "/restrictions/users", input=users )
[ "def", "edit_user_push_restrictions", "(", "self", ",", "*", "users", ")", ":", "assert", "all", "(", "isinstance", "(", "element", ",", "(", "str", ",", "unicode", ")", ")", "or", "isinstance", "(", "element", ",", "(", "str", ",", "unicode", ")", ")"...
43.416667
27.916667
def get_sdb_secret_version_paths(self, sdb_id): """ Get SDB secret version paths. This function takes the sdb_id """ sdb_resp = get_with_retry(str.join('', [self.cerberus_url, '/v1/sdb-secret-version-paths/', sdb_id]), headers=self.HEADERS) throw_if_bad_response(sdb_resp) return sdb_resp.json()
[ "def", "get_sdb_secret_version_paths", "(", "self", ",", "sdb_id", ")", ":", "sdb_resp", "=", "get_with_retry", "(", "str", ".", "join", "(", "''", ",", "[", "self", ".", "cerberus_url", ",", "'/v1/sdb-secret-version-paths/'", ",", "sdb_id", "]", ")", ",", "...
44.375
22.5
def clean_email(self): """ ensure email is in the database """ if EMAIL_CONFIRMATION: from .models import EmailAddress condition = EmailAddress.objects.filter( email__iexact=self.cleaned_data["email"], verified=True ).count() == 0 else: condition = User.objects.get( email__iexact=self.cleaned_data["email"], is_active=True ).count() == 0 if condition is True: raise forms.ValidationError( _("Email address not verified for any user account") ) return self.cleaned_data["email"]
[ "def", "clean_email", "(", "self", ")", ":", "if", "EMAIL_CONFIRMATION", ":", "from", ".", "models", "import", "EmailAddress", "condition", "=", "EmailAddress", ".", "objects", ".", "filter", "(", "email__iexact", "=", "self", ".", "cleaned_data", "[", "\"emai...
35
13.789474
def on_select(self, item, action): """ Add an action to make when an object is selected. Only one action can be stored this way. """ if not isinstance(item, int): item = self.items.index(item) self._on_select[item] = action
[ "def", "on_select", "(", "self", ",", "item", ",", "action", ")", ":", "if", "not", "isinstance", "(", "item", ",", "int", ")", ":", "item", "=", "self", ".", "items", ".", "index", "(", "item", ")", "self", ".", "_on_select", "[", "item", "]", "...
27.6
11.6
def _build_type_validator(value_type): """Build a validator that only checks the type of a value.""" def type_validator(data): """Validate instances of a particular type.""" if isinstance(data, value_type): return data raise NotValid('%r is not of type %r' % (data, value_type)) return type_validator
[ "def", "_build_type_validator", "(", "value_type", ")", ":", "def", "type_validator", "(", "data", ")", ":", "\"\"\"Validate instances of a particular type.\"\"\"", "if", "isinstance", "(", "data", ",", "value_type", ")", ":", "return", "data", "raise", "NotValid", ...
31
17.454545
def u2open(self, u2request): """ Open a connection. @param u2request: A urllib2 request. @type u2request: urllib2.Requet. @return: The opened file-like urllib2 object. @rtype: fp """ tm = self.options.timeout url = build_opener(HTTPSClientAuthHandler(self.context)) if self.u2ver() < 2.6: socket.setdefaulttimeout(tm) return url.open(u2request) else: return url.open(u2request, timeout=tm)
[ "def", "u2open", "(", "self", ",", "u2request", ")", ":", "tm", "=", "self", ".", "options", ".", "timeout", "url", "=", "build_opener", "(", "HTTPSClientAuthHandler", "(", "self", ".", "context", ")", ")", "if", "self", ".", "u2ver", "(", ")", "<", ...
33.266667
9.666667
def _generate_ordered_structures(self, sanitized_input_structure, transformations): """ Apply our input structure to our list of transformations and output a list of ordered structures that have been pruned for duplicates and for those with low symmetry (optional). Args: sanitized_input_structure: A sanitized input structure (_sanitize_input_structure) transformations: A dict of transformations (values) and name of enumeration strategy (key), the enumeration strategy name is just for record keeping Returns: None (sets self.ordered_structures and self.ordered_structures_origins instance variables) """ ordered_structures = self.ordered_structures ordered_structures_origins = self.ordered_structure_origins # utility function to combine outputs from several transformations def _add_structures( ordered_structures, ordered_structures_origins, structures_to_add, origin="" ): """ Transformations with return_ranked_list can return either just Structures or dicts (or sometimes lists!) -- until this is fixed, we use this function to concat structures given by the transformation. """ if structures_to_add: # type conversion if isinstance(structures_to_add, Structure): structures_to_add = [structures_to_add] structures_to_add = [ s["structure"] if isinstance(s, dict) else s for s in structures_to_add ] # concatenation ordered_structures += structures_to_add ordered_structures_origins += [origin] * len(structures_to_add) self.logger.info( "Adding {} ordered structures: {}".format( len(structures_to_add), origin ) ) return ordered_structures, ordered_structures_origins for origin, trans in self.transformations.items(): structures_to_add = trans.apply_transformation( self.sanitized_structure, return_ranked_list=self.num_orderings ) ordered_structures, ordered_structures_origins = _add_structures( ordered_structures, ordered_structures_origins, structures_to_add, origin=origin, ) # in case we've introduced duplicates, let's remove them self.logger.info("Pruning duplicate structures.") structures_to_remove = [] for idx, ordered_structure in enumerate(ordered_structures): if idx not in structures_to_remove: duplicate_checker = CollinearMagneticStructureAnalyzer( ordered_structure, overwrite_magmom_mode="none" ) for check_idx, check_structure in enumerate(ordered_structures): if check_idx not in structures_to_remove and check_idx != idx: if duplicate_checker.matches_ordering(check_structure): structures_to_remove.append(check_idx) if len(structures_to_remove): self.logger.info( "Removing {} duplicate ordered structures".format( len(structures_to_remove) ) ) ordered_structures = [ s for idx, s in enumerate(ordered_structures) if idx not in structures_to_remove ] ordered_structures_origins = [ o for idx, o in enumerate(ordered_structures_origins) if idx not in structures_to_remove ] # also remove low symmetry structures if self.truncate_by_symmetry: # by default, keep structures with 5 most symmetric space groups if not isinstance(self.truncate_by_symmetry, int): self.truncate_by_symmetry = 5 self.logger.info("Pruning low symmetry structures.") # first get a list of symmetries present symmetry_int_numbers = [ s.get_space_group_info()[1] for s in ordered_structures ] # then count the number of symmetry operations for that space group num_sym_ops = [ len(SpaceGroup.from_int_number(n).symmetry_ops) for n in symmetry_int_numbers ] # find the largest values... max_symmetries = sorted(list(set(num_sym_ops)), reverse=True) # ...and decide which ones to keep if len(max_symmetries) > self.truncate_by_symmetry: max_symmetries = max_symmetries[0:5] structs_to_keep = [ (idx, num) for idx, num in enumerate(num_sym_ops) if num in max_symmetries ] # sort so that highest symmetry structs are first structs_to_keep = sorted( structs_to_keep, key=lambda x: (x[1], -x[0]), reverse=True ) self.logger.info( "Removing {} low symmetry " "ordered structures".format( len(ordered_structures) - len(structs_to_keep) ) ) ordered_structures = [ordered_structures[i] for i, _ in structs_to_keep] ordered_structures_origins = [ ordered_structures_origins[i] for i, _ in structs_to_keep ] # and ensure fm is always at index 0 fm_index = ordered_structures_origins.index("fm") ordered_structures.insert(0, ordered_structures.pop(fm_index)) ordered_structures_origins.insert( 0, ordered_structures_origins.pop(fm_index) ) # if our input structure isn't in our generated structures, # let's add it manually and also keep a note of which structure # is our input: this is mostly for book-keeping/benchmarking self.input_index = None self.input_origin = None if self.input_analyzer.ordering != Ordering.NM: matches = [ self.input_analyzer.matches_ordering(s) for s in ordered_structures ] if not any(matches): ordered_structures.append(self.input_analyzer.structure) ordered_structures_origins.append("input") self.logger.info( "Input structure not present in enumerated structures, adding..." ) else: self.logger.info( "Input structure was found in enumerated " "structures at index {}".format(matches.index(True)) ) self.input_index = matches.index(True) self.input_origin = ordered_structures_origins[self.input_index] self.ordered_structures = ordered_structures self.ordered_structure_origins = ordered_structures_origins
[ "def", "_generate_ordered_structures", "(", "self", ",", "sanitized_input_structure", ",", "transformations", ")", ":", "ordered_structures", "=", "self", ".", "ordered_structures", "ordered_structures_origins", "=", "self", ".", "ordered_structure_origins", "# utility functi...
41.209302
22.337209
def base_url(self): '''The public URL for this storage''' config_value = self.config.get('url') if config_value: return self._clean_url(config_value) default_url = current_app.config.get('FS_URL') default_url = current_app.config.get('{0}URL'.format(self.backend_prefix), default_url) if default_url: url = urljoin(default_url, self.name) return self._clean_url(url) return url_for('fs.get_file', fs=self.name, filename='', _external=True)
[ "def", "base_url", "(", "self", ")", ":", "config_value", "=", "self", ".", "config", ".", "get", "(", "'url'", ")", "if", "config_value", ":", "return", "self", ".", "_clean_url", "(", "config_value", ")", "default_url", "=", "current_app", ".", "config",...
47.363636
17.363636
def clickable(self): """ Property used for determining if the widget should be clickable by the user. This is only true if the submenu of this widget is active and this widget is enabled. The widget may be either disabled by setting this property or the :py:attr:`enabled` attribute. """ if not isinstance(self.submenu,Container): return self.submenu.name == self.submenu.menu.activeSubMenu and self.submenu.menu.name == self.window.activeMenu and self.enabled else: return self.submenu.clickable and self.enabled
[ "def", "clickable", "(", "self", ")", ":", "if", "not", "isinstance", "(", "self", ".", "submenu", ",", "Container", ")", ":", "return", "self", ".", "submenu", ".", "name", "==", "self", ".", "submenu", ".", "menu", ".", "activeSubMenu", "and", "self"...
50
33.333333
def check_get_revoked(self): """ Create a CRL object with 100 Revoked objects, then call the get_revoked method repeatedly. """ crl = CRL() for i in xrange(100): crl.add_revoked(Revoked()) for i in xrange(self.iterations): crl.get_revoked()
[ "def", "check_get_revoked", "(", "self", ")", ":", "crl", "=", "CRL", "(", ")", "for", "i", "in", "xrange", "(", "100", ")", ":", "crl", ".", "add_revoked", "(", "Revoked", "(", ")", ")", "for", "i", "in", "xrange", "(", "self", ".", "iterations", ...
31.1
8.7
def get_default_pandas_parsers() -> List[AnyParser]: """ Utility method to return the default parsers able to parse a dictionary from a file. :return: """ return [SingleFileParserFunction(parser_function=read_dataframe_from_xls, streaming_mode=False, supported_exts={'.xls', '.xlsx', '.xlsm'}, supported_types={pd.DataFrame}, option_hints=pandas_parsers_option_hints_xls), SingleFileParserFunction(parser_function=read_df_or_series_from_csv, streaming_mode=False, supported_exts={'.csv', '.txt'}, supported_types={pd.DataFrame, pd.Series}, option_hints=pandas_parsers_option_hints_csv), ]
[ "def", "get_default_pandas_parsers", "(", ")", "->", "List", "[", "AnyParser", "]", ":", "return", "[", "SingleFileParserFunction", "(", "parser_function", "=", "read_dataframe_from_xls", ",", "streaming_mode", "=", "False", ",", "supported_exts", "=", "{", "'.xls'"...
57.0625
28.0625
def _create_run_ini(self, port, production, output='development.ini', source='development.ini', override_site_url=True): """ Create run/development.ini in datadir with debug and site_url overridden and with correct db passwords inserted """ cp = SafeConfigParser() try: cp.read([self.target + '/' + source]) except ConfigParserError: raise DatacatsError('Error reading development.ini') cp.set('DEFAULT', 'debug', 'false' if production else 'true') if self.site_url: site_url = self.site_url else: if is_boot2docker(): web_address = socket.gethostbyname(docker_host()) else: web_address = self.address site_url = 'http://{}:{}'.format(web_address, port) if override_site_url: cp.set('app:main', 'ckan.site_url', site_url) cp.set('app:main', 'sqlalchemy.url', 'postgresql://ckan:{0}@db:5432/ckan' .format(self.passwords['CKAN_PASSWORD'])) cp.set('app:main', 'ckan.datastore.read_url', 'postgresql://ckan_datastore_readonly:{0}@db:5432/ckan_datastore' .format(self.passwords['DATASTORE_RO_PASSWORD'])) cp.set('app:main', 'ckan.datastore.write_url', 'postgresql://ckan_datastore_readwrite:{0}@db:5432/ckan_datastore' .format(self.passwords['DATASTORE_RW_PASSWORD'])) cp.set('app:main', 'solr_url', 'http://solr:8080/solr') cp.set('app:main', 'ckan.redis.url', 'http://redis:6379') cp.set('app:main', 'beaker.session.secret', self.passwords['BEAKER_SESSION_SECRET']) if not isdir(self.sitedir + '/run'): makedirs(self.sitedir + '/run') # upgrade old datadir with open(self.sitedir + '/run/' + output, 'w') as runini: cp.write(runini)
[ "def", "_create_run_ini", "(", "self", ",", "port", ",", "production", ",", "output", "=", "'development.ini'", ",", "source", "=", "'development.ini'", ",", "override_site_url", "=", "True", ")", ":", "cp", "=", "SafeConfigParser", "(", ")", "try", ":", "cp...
44.162791
21.511628
def create_message_set(messages, codec=CODEC_NONE, key=None, compresslevel=None): """Create a message set using the given codec. If codec is CODEC_NONE, return a list of raw Kafka messages. Otherwise, return a list containing a single codec-encoded message. """ if codec == CODEC_NONE: return [create_message(m, k) for m, k in messages] elif codec == CODEC_GZIP: return [create_gzip_message(messages, key, compresslevel)] elif codec == CODEC_SNAPPY: return [create_snappy_message(messages, key)] else: raise UnsupportedCodecError("Codec 0x%02x unsupported" % (codec,))
[ "def", "create_message_set", "(", "messages", ",", "codec", "=", "CODEC_NONE", ",", "key", "=", "None", ",", "compresslevel", "=", "None", ")", ":", "if", "codec", "==", "CODEC_NONE", ":", "return", "[", "create_message", "(", "m", ",", "k", ")", "for", ...
44.285714
20.785714
def _set_reg(cls, reg): """The writing complement of _get_reg """ cls._reg = [task_cls for task_cls in reg.values() if task_cls is not cls.AMBIGUOUS_CLASS]
[ "def", "_set_reg", "(", "cls", ",", "reg", ")", ":", "cls", ".", "_reg", "=", "[", "task_cls", "for", "task_cls", "in", "reg", ".", "values", "(", ")", "if", "task_cls", "is", "not", "cls", ".", "AMBIGUOUS_CLASS", "]" ]
44
18.5
def getActiveProperties(self): """ Returns the non-zero accidental dignities. """ score = self.getScoreProperties() return {key: value for (key, value) in score.items() if value != 0}
[ "def", "getActiveProperties", "(", "self", ")", ":", "score", "=", "self", ".", "getScoreProperties", "(", ")", "return", "{", "key", ":", "value", "for", "(", "key", ",", "value", ")", "in", "score", ".", "items", "(", ")", "if", "value", "!=", "0",...
43.8
8.2
def file_contents(file_name): """Given a file name to a valid file returns the file object.""" curr_dir = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(curr_dir, file_name)) as the_file: contents = the_file.read() return contents
[ "def", "file_contents", "(", "file_name", ")", ":", "curr_dir", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "curr_dir", ",", "f...
44.666667
12.666667
def add_package(package, ignore_check=False, prevent_pending=False, image=None, restart=False): ''' Install a package using DISM Args: package (str): The package to install. Can be a .cab file, a .msu file, or a folder .. note:: An `.msu` package is supported only when the target image is offline, either mounted or applied. ignore_check (Optional[bool]): Skip installation of the package if the applicability checks fail prevent_pending (Optional[bool]): Skip the installation of the package if there are pending online actions image (Optional[str]): The path to the root directory of an offline Windows image. If ``None`` is passed, the running operating system is targeted. Default is None. restart (Optional[bool]): Reboot the machine if required by the install Returns: dict: A dictionary containing the results of the command CLI Example: .. code-block:: bash salt '*' dism.add_package C:\\Packages\\package.cab ''' cmd = ['DISM', '/Quiet', '/Image:{0}'.format(image) if image else '/Online', '/Add-Package', '/PackagePath:{0}'.format(package)] if ignore_check: cmd.append('/IgnoreCheck') if prevent_pending: cmd.append('/PreventPending') if not restart: cmd.append('/NoRestart') return __salt__['cmd.run_all'](cmd)
[ "def", "add_package", "(", "package", ",", "ignore_check", "=", "False", ",", "prevent_pending", "=", "False", ",", "image", "=", "None", ",", "restart", "=", "False", ")", ":", "cmd", "=", "[", "'DISM'", ",", "'/Quiet'", ",", "'/Image:{0}'", ".", "forma...
28.833333
22.87037
def normalize_datum(self, datum): """ Convert `datum` into something that umsgpack likes. :param datum: something that we want to process with umsgpack :return: a packable version of `datum` :raises TypeError: if `datum` cannot be packed This message is called by :meth:`.packb` to recursively normalize an input value before passing it to :func:`umsgpack.packb`. Values are normalized according to the following table. +-------------------------------+-------------------------------+ | **Value** | **MsgPack Family** | +-------------------------------+-------------------------------+ | :data:`None` | `nil byte`_ (0xC0) | +-------------------------------+-------------------------------+ | :data:`True` | `true byte`_ (0xC3) | +-------------------------------+-------------------------------+ | :data:`False` | `false byte`_ (0xC2) | +-------------------------------+-------------------------------+ | :class:`int` | `integer family`_ | +-------------------------------+-------------------------------+ | :class:`float` | `float family`_ | +-------------------------------+-------------------------------+ | String | `str family`_ | +-------------------------------+-------------------------------+ | :class:`bytes` | `bin family`_ | +-------------------------------+-------------------------------+ | :class:`bytearray` | `bin family`_ | +-------------------------------+-------------------------------+ | :class:`memoryview` | `bin family`_ | +-------------------------------+-------------------------------+ | :class:`collections.Sequence` | `array family`_ | +-------------------------------+-------------------------------+ | :class:`collections.Set` | `array family`_ | +-------------------------------+-------------------------------+ | :class:`collections.Mapping` | `map family`_ | +-------------------------------+-------------------------------+ | :class:`uuid.UUID` | Converted to String | +-------------------------------+-------------------------------+ .. _nil byte: https://github.com/msgpack/msgpack/blob/ 0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#formats-nil .. _true byte: https://github.com/msgpack/msgpack/blob/ 0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#bool-format-family .. _false byte: https://github.com/msgpack/msgpack/blob/ 0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#bool-format-family .. _integer family: https://github.com/msgpack/msgpack/blob/ 0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#int-format-family .. _float family: https://github.com/msgpack/msgpack/blob/ 0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#float-format-family .. _str family: https://github.com/msgpack/msgpack/blob/ 0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#str-format-family .. _array family: https://github.com/msgpack/msgpack/blob/ 0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#array-format-family .. _map family: https://github.com/msgpack/msgpack/blob/ 0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md #mapping-format-family .. _bin family: https://github.com/msgpack/msgpack/blob/ 0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#bin-format-family """ if datum is None: return datum if isinstance(datum, self.PACKABLE_TYPES): return datum if isinstance(datum, uuid.UUID): datum = str(datum) if isinstance(datum, bytearray): datum = bytes(datum) if isinstance(datum, memoryview): datum = datum.tobytes() if hasattr(datum, 'isoformat'): datum = datum.isoformat() if isinstance(datum, (bytes, str)): return datum if isinstance(datum, (collections.Sequence, collections.Set)): return [self.normalize_datum(item) for item in datum] if isinstance(datum, collections.Mapping): out = {} for k, v in datum.items(): out[k] = self.normalize_datum(v) return out raise TypeError( '{} is not msgpackable'.format(datum.__class__.__name__))
[ "def", "normalize_datum", "(", "self", ",", "datum", ")", ":", "if", "datum", "is", "None", ":", "return", "datum", "if", "isinstance", "(", "datum", ",", "self", ".", "PACKABLE_TYPES", ")", ":", "return", "datum", "if", "isinstance", "(", "datum", ",", ...
50.347368
26.115789
def collect_string_fields(format_string) -> Iterable[Optional[str]]: """ Given a format string, return an iterator of all the valid format fields. It handles nested fields as well. """ formatter = string.Formatter() try: parseiterator = formatter.parse(format_string) for result in parseiterator: if all(item is None for item in result[1:]): # not a replacement format continue name = result[1] nested = result[2] yield name if nested: for field in collect_string_fields(nested): yield field except ValueError as exc: # Probably the format string is invalid. if exc.args[0].startswith("cannot switch from manual"): # On Jython, parsing a string with both manual # and automatic positions will fail with a ValueError, # while on CPython it will simply return the fields, # the validation being done in the interpreter (?). # We're just returning two mixed fields in order # to trigger the format-combined-specification check. yield "" yield "1" return raise IncompleteFormatString(format_string)
[ "def", "collect_string_fields", "(", "format_string", ")", "->", "Iterable", "[", "Optional", "[", "str", "]", "]", ":", "formatter", "=", "string", ".", "Formatter", "(", ")", "try", ":", "parseiterator", "=", "formatter", ".", "parse", "(", "format_string"...
40.903226
16.193548
def update_role_config_group(self, name, apigroup): """ Update a role config group. @param name: Role config group name. @param apigroup: The updated role config group. @return: The updated ApiRoleConfigGroup object. @since: API v3 """ return role_config_groups.update_role_config_group( self._get_resource_root(), self.name, name, apigroup, self._get_cluster_name())
[ "def", "update_role_config_group", "(", "self", ",", "name", ",", "apigroup", ")", ":", "return", "role_config_groups", ".", "update_role_config_group", "(", "self", ".", "_get_resource_root", "(", ")", ",", "self", ".", "name", ",", "name", ",", "apigroup", "...
33.75
12.25
def workers(ctx, account, top): """ List all workers (of an account) """ workers = Workers(account) t = [["id", "name/url", "daily_pay", "votes", "time", "account"]] workers_sorted = sorted( workers, key=lambda x: int(x["total_votes_for"]), reverse=True ) if top: workers_sorted = workers_sorted[: top + 1] for worker in workers_sorted: if worker["work_end_date"] < datetime.datetime.utcnow(): continue votes = Amount({"amount": worker["total_votes_for"], "asset_id": "1.3.0"}) amount = Amount({"amount": worker["daily_pay"], "asset_id": "1.3.0"}) t.append( [ worker["id"], "{name}\n{url}".format(**worker), str(amount), str(votes), "{work_begin_date:%Y-%m-%d}\n-\n{work_end_date:%Y-%m-%d}".format( **worker ), str(Account(worker["worker_account"])["name"]), ] ) print_table(t)
[ "def", "workers", "(", "ctx", ",", "account", ",", "top", ")", ":", "workers", "=", "Workers", "(", "account", ")", "t", "=", "[", "[", "\"id\"", ",", "\"name/url\"", ",", "\"daily_pay\"", ",", "\"votes\"", ",", "\"time\"", ",", "\"account\"", "]", "]"...
36.071429
20.25
def archive_wheelfile(base_name, base_dir): """Archive all files under `base_dir` in a whl file and name it like `base_name`. """ olddir = os.path.abspath(os.curdir) base_name = os.path.abspath(base_name) try: os.chdir(base_dir) return make_wheelfile_inner(base_name) finally: os.chdir(olddir)
[ "def", "archive_wheelfile", "(", "base_name", ",", "base_dir", ")", ":", "olddir", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "curdir", ")", "base_name", "=", "os", ".", "path", ".", "abspath", "(", "base_name", ")", "try", ":", "os", "....
30.454545
11.454545
def refresh_cache(self, if_want_update=False): """Update all threads currently stored in our cache.""" for thread in tuple(self._thread_cache.values()): if if_want_update: if not thread.want_update: continue thread.update()
[ "def", "refresh_cache", "(", "self", ",", "if_want_update", "=", "False", ")", ":", "for", "thread", "in", "tuple", "(", "self", ".", "_thread_cache", ".", "values", "(", ")", ")", ":", "if", "if_want_update", ":", "if", "not", "thread", ".", "want_updat...
41.857143
8.571429
def schema_term(self): """Return the Table term for this resource, which is referenced either by the `table` property or the `schema` property""" if not self.name: raise MetapackError("Resource for url '{}' doe not have name".format(self.url)) t = self.doc.find_first('Root.Table', value=self.get_value('name')) frm = 'name' if not t: t = self.doc.find_first('Root.Table', value=self.get_value('schema')) frm = 'schema' if not t: frm = None return t
[ "def", "schema_term", "(", "self", ")", ":", "if", "not", "self", ".", "name", ":", "raise", "MetapackError", "(", "\"Resource for url '{}' doe not have name\"", ".", "format", "(", "self", ".", "url", ")", ")", "t", "=", "self", ".", "doc", ".", "find_fir...
30.5
26.777778