text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def media_artist(self): """Artist of current playing media (Music track only).""" try: artists = self.session['NowPlayingItem']['Artists'] if len(artists) > 1: return artists[0] else: return artists except KeyError: return None
[ "def", "media_artist", "(", "self", ")", ":", "try", ":", "artists", "=", "self", ".", "session", "[", "'NowPlayingItem'", "]", "[", "'Artists'", "]", "if", "len", "(", "artists", ")", ">", "1", ":", "return", "artists", "[", "0", "]", "else", ":", ...
32.2
14.9
def add_device(self, name, protocol, model=None, **parameters): """Add a new device. :return: a :class:`Device` or :class:`DeviceGroup` instance. """ device = Device(self.lib.tdAddDevice(), lib=self.lib) try: device.name = name device.protocol = protocol if model: device.model = model for key, value in parameters.items(): device.set_parameter(key, value) # Return correct type return DeviceFactory(device.id, lib=self.lib) except Exception: import sys exc_info = sys.exc_info() try: device.remove() except: pass if "with_traceback" in dir(Exception): raise exc_info[0].with_traceback(exc_info[1], exc_info[2]) else: exec("raise exc_info[0], exc_info[1], exc_info[2]")
[ "def", "add_device", "(", "self", ",", "name", ",", "protocol", ",", "model", "=", "None", ",", "*", "*", "parameters", ")", ":", "device", "=", "Device", "(", "self", ".", "lib", ".", "tdAddDevice", "(", ")", ",", "lib", "=", "self", ".", "lib", ...
33.321429
17.857143
def eval(expr, parser='pandas', engine=None, truediv=True, local_dict=None, global_dict=None, resolvers=(), level=0, target=None, inplace=False): """Evaluate a Python expression as a string using various backends. The following arithmetic operations are supported: ``+``, ``-``, ``*``, ``/``, ``**``, ``%``, ``//`` (python engine only) along with the following boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not). Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`, :keyword:`or`, and :keyword:`not` with the same semantics as the corresponding bitwise operators. :class:`~pandas.Series` and :class:`~pandas.DataFrame` objects are supported and behave as they would with plain ol' Python evaluation. Parameters ---------- expr : str or unicode The expression to evaluate. This string cannot contain any Python `statements <https://docs.python.org/3/reference/simple_stmts.html#simple-statements>`__, only Python `expressions <https://docs.python.org/3/reference/simple_stmts.html#expression-statements>`__. parser : string, default 'pandas', {'pandas', 'python'} The parser to use to construct the syntax tree from the expression. The default of ``'pandas'`` parses code slightly different than standard Python. Alternatively, you can parse an expression using the ``'python'`` parser to retain strict Python semantics. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details. engine : string or None, default 'numexpr', {'python', 'numexpr'} The engine used to evaluate the expression. Supported engines are - None : tries to use ``numexpr``, falls back to ``python`` - ``'numexpr'``: This default engine evaluates pandas objects using numexpr for large speed ups in complex expressions with large frames. - ``'python'``: Performs operations as if you had ``eval``'d in top level python. This engine is generally not that useful. More backends may be available in the future. truediv : bool, optional Whether to use true division, like in Python >= 3 local_dict : dict or None, optional A dictionary of local variables, taken from locals() by default. global_dict : dict or None, optional A dictionary of global variables, taken from globals() by default. resolvers : list of dict-like or None, optional A list of objects implementing the ``__getitem__`` special method that you can use to inject an additional collection of namespaces to use for variable lookup. For example, this is used in the :meth:`~DataFrame.query` method to inject the ``DataFrame.index`` and ``DataFrame.columns`` variables that refer to their respective :class:`~pandas.DataFrame` instance attributes. level : int, optional The number of prior stack frames to traverse and add to the current scope. Most users will **not** need to change this parameter. target : object, optional, default None This is the target object for assignment. It is used when there is variable assignment in the expression. If so, then `target` must support item assignment with string keys, and if a copy is being returned, it must also support `.copy()`. inplace : bool, default False If `target` is provided, and the expression mutates `target`, whether to modify `target` inplace. Otherwise, return a copy of `target` with the mutation. Returns ------- ndarray, numeric scalar, DataFrame, Series Raises ------ ValueError There are many instances where such an error can be raised: - `target=None`, but the expression is multiline. - The expression is multiline, but not all them have item assignment. An example of such an arrangement is this: a = b + 1 a + 2 Here, there are expressions on different lines, making it multiline, but the last line has no variable assigned to the output of `a + 2`. - `inplace=True`, but the expression is missing item assignment. - Item assignment is provided, but the `target` does not support string item assignment. - Item assignment is provided and `inplace=False`, but the `target` does not support the `.copy()` method See Also -------- DataFrame.query DataFrame.eval Notes ----- The ``dtype`` of any objects involved in an arithmetic ``%`` operation are recursively cast to ``float64``. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details. """ from pandas.core.computation.expr import Expr inplace = validate_bool_kwarg(inplace, "inplace") if isinstance(expr, str): _check_expression(expr) exprs = [e.strip() for e in expr.splitlines() if e.strip() != ''] else: exprs = [expr] multi_line = len(exprs) > 1 if multi_line and target is None: raise ValueError("multi-line expressions are only valid in the " "context of data, use DataFrame.eval") ret = None first_expr = True target_modified = False for expr in exprs: expr = _convert_expression(expr) engine = _check_engine(engine) _check_parser(parser) _check_resolvers(resolvers) _check_for_locals(expr, level, parser) # get our (possibly passed-in) scope env = _ensure_scope(level + 1, global_dict=global_dict, local_dict=local_dict, resolvers=resolvers, target=target) parsed_expr = Expr(expr, engine=engine, parser=parser, env=env, truediv=truediv) # construct the engine and evaluate the parsed expression eng = _engines[engine] eng_inst = eng(parsed_expr) ret = eng_inst.evaluate() if parsed_expr.assigner is None: if multi_line: raise ValueError("Multi-line expressions are only valid" " if all expressions contain an assignment") elif inplace: raise ValueError("Cannot operate inplace " "if there is no assignment") # assign if needed assigner = parsed_expr.assigner if env.target is not None and assigner is not None: target_modified = True # if returning a copy, copy only on the first assignment if not inplace and first_expr: try: target = env.target.copy() except AttributeError: raise ValueError("Cannot return a copy of the target") else: target = env.target # TypeError is most commonly raised (e.g. int, list), but you # get IndexError if you try to do this assignment on np.ndarray. # we will ignore numpy warnings here; e.g. if trying # to use a non-numeric indexer try: with warnings.catch_warnings(record=True): # TODO: Filter the warnings we actually care about here. target[assigner] = ret except (TypeError, IndexError): raise ValueError("Cannot assign expression output to target") if not resolvers: resolvers = ({assigner: ret},) else: # existing resolver needs updated to handle # case of mutating existing column in copy for resolver in resolvers: if assigner in resolver: resolver[assigner] = ret break else: resolvers += ({assigner: ret},) ret = None first_expr = False # We want to exclude `inplace=None` as being False. if inplace is False: return target if target_modified else ret
[ "def", "eval", "(", "expr", ",", "parser", "=", "'pandas'", ",", "engine", "=", "None", ",", "truediv", "=", "True", ",", "local_dict", "=", "None", ",", "global_dict", "=", "None", ",", "resolvers", "=", "(", ")", ",", "level", "=", "0", ",", "tar...
41.255102
23.316327
def resolve_revision(self, dest, url, rev_options): """ Resolve a revision to a new RevOptions object with the SHA1 of the branch, tag, or ref if found. Args: rev_options: a RevOptions object. """ rev = rev_options.arg_rev sha, is_branch = self.get_revision_sha(dest, rev) if sha is not None: rev_options = rev_options.make_new(sha) rev_options.branch_name = rev if is_branch else None return rev_options # Do not show a warning for the common case of something that has # the form of a Git commit hash. if not looks_like_hash(rev): logger.warning( "Did not find branch or tag '%s', assuming revision or ref.", rev, ) if not rev.startswith('refs/'): return rev_options # If it looks like a ref, we have to fetch it explicitly. self.run_command( ['fetch', '-q', url] + rev_options.to_args(), cwd=dest, ) # Change the revision to the SHA of the ref we fetched sha = self.get_revision(dest, rev='FETCH_HEAD') rev_options = rev_options.make_new(sha) return rev_options
[ "def", "resolve_revision", "(", "self", ",", "dest", ",", "url", ",", "rev_options", ")", ":", "rev", "=", "rev_options", ".", "arg_rev", "sha", ",", "is_branch", "=", "self", ".", "get_revision_sha", "(", "dest", ",", "rev", ")", "if", "sha", "is", "n...
32.210526
19.736842
def GetFileEntryByPathSpec(self, path_spec): """Retrieves a file entry for a path specification. Args: path_spec (PathSpec): a path specification. Returns: CPIOFileEntry: a file entry or None if not available. """ location = getattr(path_spec, 'location', None) if (location is None or not location.startswith(self.LOCATION_ROOT)): return None if len(location) == 1: return cpio_file_entry.CPIOFileEntry( self._resolver_context, self, path_spec, is_root=True, is_virtual=True) cpio_archive_file_entry = self._cpio_archive_file.GetFileEntryByPath( location[1:]) if cpio_archive_file_entry is None: return None return cpio_file_entry.CPIOFileEntry( self._resolver_context, self, path_spec, cpio_archive_file_entry=cpio_archive_file_entry)
[ "def", "GetFileEntryByPathSpec", "(", "self", ",", "path_spec", ")", ":", "location", "=", "getattr", "(", "path_spec", ",", "'location'", ",", "None", ")", "if", "(", "location", "is", "None", "or", "not", "location", ".", "startswith", "(", "self", ".", ...
29.892857
19.535714
def predict(x, P, F=1, Q=0, u=0, B=1, alpha=1.): """ Predict next state (prior) using the Kalman filter state propagation equations. Parameters ---------- x : numpy.array State estimate vector P : numpy.array Covariance matrix F : numpy.array() State Transition matrix Q : numpy.array, Optional Process noise matrix u : numpy.array, Optional, default 0. Control vector. If non-zero, it is multiplied by B to create the control input into the system. B : numpy.array, optional, default 0. Control transition matrix. alpha : float, Optional, default=1.0 Fading memory setting. 1.0 gives the normal Kalman filter, and values slightly larger than 1.0 (such as 1.02) give a fading memory effect - previous measurements have less influence on the filter's estimates. This formulation of the Fading memory filter (there are many) is due to Dan Simon Returns ------- x : numpy.array Prior state estimate vector P : numpy.array Prior covariance matrix """ if np.isscalar(F): F = np.array(F) x = dot(F, x) + dot(B, u) P = (alpha * alpha) * dot(dot(F, P), F.T) + Q return x, P
[ "def", "predict", "(", "x", ",", "P", ",", "F", "=", "1", ",", "Q", "=", "0", ",", "u", "=", "0", ",", "B", "=", "1", ",", "alpha", "=", "1.", ")", ":", "if", "np", ".", "isscalar", "(", "F", ")", ":", "F", "=", "np", ".", "array", "(...
24.176471
22.647059
def ec2_fab(service, args): """ Run Fabric commands against EC2 instances """ instance_ids = args.instances instances = service.list(elb=args.elb, instance_ids=instance_ids) hosts = service.resolve_hosts(instances) fab.env.hosts = hosts fab.env.key_filename = settings.get('SSH', 'KEY_FILE') fab.env.user = settings.get('SSH', 'USER', getpass.getuser()) fab.env.parallel = True fabfile = find_fabfile(args.file) if not fabfile: print 'Couldn\'t find any fabfiles!' return fab.env.real_fabile = fabfile docstring, callables, default = load_fabfile(fabfile) fab_state.commands.update(callables) commands_to_run = parse_arguments(args.methods) for name, args, kwargs, arg_hosts, arg_roles, arg_exclude_hosts in commands_to_run: fab.execute(name, hosts=arg_hosts, roles=arg_roles, exclude_hosts=arg_exclude_hosts, *args, **kwargs)
[ "def", "ec2_fab", "(", "service", ",", "args", ")", ":", "instance_ids", "=", "args", ".", "instances", "instances", "=", "service", ".", "list", "(", "elb", "=", "args", ".", "elb", ",", "instance_ids", "=", "instance_ids", ")", "hosts", "=", "service",...
33.586207
16
def unset_refresh_cookies(response): """ takes a flask response object, and configures it to unset (delete) the refresh token from the response cookies. if `jwt_csrf_in_cookies` (see :ref:`configuration options`) is `true`, this will also remove the refresh csrf double submit value from the response cookies as well. :param response: the flask response object to delete the jwt cookies in. """ if not config.jwt_in_cookies: raise RuntimeWarning("unset_refresh_cookies() called without " "'JWT_TOKEN_LOCATION' configured to use cookies") response.set_cookie(config.refresh_cookie_name, value='', expires=0, secure=config.cookie_secure, httponly=True, domain=config.cookie_domain, path=config.refresh_cookie_path, samesite=config.cookie_samesite) if config.csrf_protect and config.csrf_in_cookies: response.set_cookie(config.refresh_csrf_cookie_name, value='', expires=0, secure=config.cookie_secure, httponly=False, domain=config.cookie_domain, path=config.refresh_csrf_cookie_path, samesite=config.cookie_samesite)
[ "def", "unset_refresh_cookies", "(", "response", ")", ":", "if", "not", "config", ".", "jwt_in_cookies", ":", "raise", "RuntimeWarning", "(", "\"unset_refresh_cookies() called without \"", "\"'JWT_TOKEN_LOCATION' configured to use cookies\"", ")", "response", ".", "set_cookie...
46.354839
18.225806
def parse_args(): """Parse the command line arguments.""" parser = argparse.ArgumentParser( description='Check kafka current status', ) parser.add_argument( "--cluster-type", "-t", dest='cluster_type', required=True, help='Type of cluster', default=None, ) parser.add_argument( "--cluster-name", "-c", dest='cluster_name', help='Name of the cluster', ) parser.add_argument( '--discovery-base-path', dest='discovery_base_path', type=str, help='Path of the directory containing the <cluster_type>.yaml config', ) parser.add_argument( "--broker-id", help='The broker id where the check is running. Set to -1 if you use automatic ' 'broker ids, and it will read the id from data-path instead. This parameter is ' 'required only in case controller-only or first-broker-only are used.', type=convert_to_broker_id, ) parser.add_argument( "--data-path", help='Path to the Kafka data folder.', ) parser.add_argument( '--controller-only', action="store_true", help='If this parameter is specified, it will do nothing and succeed on ' 'non-controller brokers. Default: %(default)s', ) parser.add_argument( '--first-broker-only', action='store_true', help='If specified, the command will only perform the check if ' 'broker_id is the lowest broker id in the cluster. If it is not the lowest, ' 'it will not perform any check and succeed immediately. ' 'Default: %(default)s', ) parser.add_argument( '-v', '--verbose', help='print verbose execution information. Default: %(default)s', action="store_true", default=False, ) parser.add_argument( '-j', '--json', help='Print output in json format. Default: %(default)s', action="store_true", default=False, ) subparsers = parser.add_subparsers() MinIsrCmd().add_subparser(subparsers) ReplicaUnavailabilityCmd().add_subparser(subparsers) ReplicationFactorCmd().add_subparser(subparsers) OfflineCmd().add_subparser(subparsers) return parser.parse_args()
[ "def", "parse_args", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Check kafka current status'", ",", ")", "parser", ".", "add_argument", "(", "\"--cluster-type\"", ",", "\"-t\"", ",", "dest", "=", "'cluster_type'", ...
31.388889
20.763889
def get_par_contribution(self,parlist_dict=None,include_prior_results=False): """get a dataframe the prior and posterior uncertainty reduction as a result of some parameter becoming perfectly known Parameters ---------- parlist_dict : dict a nested dictionary-list of groups of parameters that are to be treated as perfectly known. key values become row labels in returned dataframe. If None, each adjustable parameter is sequentially treated as known and the returned dataframe has row labels for each adjustable parameter include_prior_results : bool flag to return a multi-indexed dataframe with both conditional prior and posterior forecast uncertainty estimates. Default is False Returns ------- pandas.DataFrame : pandas.DataFrame a dataframe that summarizes the parameter contribution analysis. The dataframe has index (row labels) of the keys in parlist_dict and a column labels of forecast names. The values in the dataframe are the posterior variance of the forecast conditional on perfect knowledge of the parameters in the values of parlist_dict. Varies depending on `include_prior_results`. Example ------- ``>>>import pyemu`` ``>>>sc = pyemu.Schur(jco="pest.jcb")`` ``>>>df = sc.get_par_contribution()`` """ self.log("calculating contribution from parameters") if parlist_dict is None: parlist_dict = {}#dict(zip(self.pst.adj_par_names,self.pst.adj_par_names)) # make sure all of the adjustable pars are in the jco for pname in self.pst.adj_par_names: if pname in self.jco.col_names: parlist_dict[pname] = pname else: if type(parlist_dict) == list: parlist_dict = dict(zip(parlist_dict,parlist_dict)) results = {} names = ["base"] for forecast in self.prior_forecast.keys(): pr = self.prior_forecast[forecast] pt = self.posterior_forecast[forecast] #reduce = 100.0 * ((pr - pt) / pr) results[(forecast,"prior")] = [pr] results[(forecast,"post")] = [pt] #results[(forecast,"percent_reduce")] = [reduce] for case_name,par_list in parlist_dict.items(): if len(par_list) == 0: continue names.append(case_name) self.log("calculating contribution from: " + str(par_list)) case_prior,case_post = self.__contribution_from_parameters(par_list) self.log("calculating contribution from: " + str(par_list)) for forecast in case_prior.keys(): pr = case_prior[forecast] pt = case_post[forecast] #reduce = 100.0 * ((pr - pt) / pr) results[(forecast, "prior")].append(pr) results[(forecast, "post")].append(pt) #results[(forecast, "percent_reduce")].append(reduce) df = pd.DataFrame(results,index=names) #base = df.loc["base",df.columns.get_level_values(1)=="post"] #df = 1.0 - (df.loc[:,df.columns.get_level_values(1)=="post"] / base) self.log("calculating contribution from parameters") if include_prior_results: return df else: df = df.xs("post", level=1, drop_level=True, axis=1) return df
[ "def", "get_par_contribution", "(", "self", ",", "parlist_dict", "=", "None", ",", "include_prior_results", "=", "False", ")", ":", "self", ".", "log", "(", "\"calculating contribution from parameters\"", ")", "if", "parlist_dict", "is", "None", ":", "parlist_dict",...
43.725
21.4125
def save_object(self, obj): """ Save object to disk as JSON. Generally shouldn't be called directly. """ obj.pre_save(self.jurisdiction.jurisdiction_id) filename = '{0}_{1}.json'.format(obj._type, obj._id).replace('/', '-') self.info('save %s %s as %s', obj._type, obj, filename) self.debug(json.dumps(OrderedDict(sorted(obj.as_dict().items())), cls=utils.JSONEncoderPlus, indent=4, separators=(',', ': '))) self.output_names[obj._type].add(filename) with open(os.path.join(self.datadir, filename), 'w') as f: json.dump(obj.as_dict(), f, cls=utils.JSONEncoderPlus) # validate after writing, allows for inspection on failure try: obj.validate() except ValueError as ve: if self.strict_validation: raise ve else: self.warning(ve) # after saving and validating, save subordinate objects for obj in obj._related: self.save_object(obj)
[ "def", "save_object", "(", "self", ",", "obj", ")", ":", "obj", ".", "pre_save", "(", "self", ".", "jurisdiction", ".", "jurisdiction_id", ")", "filename", "=", "'{0}_{1}.json'", ".", "format", "(", "obj", ".", "_type", ",", "obj", ".", "_id", ")", "."...
34.096774
22.225806
def render_mail_template(subject_template, body_template, context): """ Renders both the subject and body templates in the given context. Returns a tuple (subject, body) of the result. """ try: subject = strip_spaces(render_to_string(subject_template, context)) body = render_to_string(body_template, context) finally: pass return subject, body
[ "def", "render_mail_template", "(", "subject_template", ",", "body_template", ",", "context", ")", ":", "try", ":", "subject", "=", "strip_spaces", "(", "render_to_string", "(", "subject_template", ",", "context", ")", ")", "body", "=", "render_to_string", "(", ...
32.166667
21.666667
def _class_type(klass, ancestors=None): """return a ClassDef node type to differ metaclass and exception from 'regular' classes """ # XXX we have to store ancestors in case we have an ancestor loop if klass._type is not None: return klass._type if _is_metaclass(klass): klass._type = "metaclass" elif klass.name.endswith("Exception"): klass._type = "exception" else: if ancestors is None: ancestors = set() klass_name = klass.qname() if klass_name in ancestors: # XXX we are in loop ancestors, and have found no type klass._type = "class" return "class" ancestors.add(klass_name) for base in klass.ancestors(recurs=False): name = _class_type(base, ancestors) if name != "class": if name == "metaclass" and not _is_metaclass(klass): # don't propagate it if the current class # can't be a metaclass continue klass._type = base.type break if klass._type is None: klass._type = "class" return klass._type
[ "def", "_class_type", "(", "klass", ",", "ancestors", "=", "None", ")", ":", "# XXX we have to store ancestors in case we have an ancestor loop", "if", "klass", ".", "_type", "is", "not", "None", ":", "return", "klass", ".", "_type", "if", "_is_metaclass", "(", "k...
36.28125
11.375
def configure_environment(self, last_trade, benchmark, timezone): ''' Prepare benchmark loader and trading context ''' if last_trade.tzinfo is None: last_trade = pytz.utc.localize(last_trade) # Setup the trading calendar from market informations self.benchmark = benchmark self.context = TradingEnvironment( bm_symbol=benchmark, exchange_tz=timezone, load=self._get_benchmark_handler(last_trade))
[ "def", "configure_environment", "(", "self", ",", "last_trade", ",", "benchmark", ",", "timezone", ")", ":", "if", "last_trade", ".", "tzinfo", "is", "None", ":", "last_trade", "=", "pytz", ".", "utc", ".", "localize", "(", "last_trade", ")", "# Setup the tr...
39.583333
16.916667
def xpathNextAncestorOrSelf(self, ctxt): """Traversal function for the "ancestor-or-self" direction he ancestor-or-self axis contains the context node and ancestors of the context node in reverse document order; thus the context node is the first node on the axis, and the context node's parent the second; parent here is defined the same as with the parent axis. """ if ctxt is None: ctxt__o = None else: ctxt__o = ctxt._o ret = libxml2mod.xmlXPathNextAncestorOrSelf(ctxt__o, self._o) if ret is None:raise xpathError('xmlXPathNextAncestorOrSelf() failed') __tmp = xmlNode(_obj=ret) return __tmp
[ "def", "xpathNextAncestorOrSelf", "(", "self", ",", "ctxt", ")", ":", "if", "ctxt", "is", "None", ":", "ctxt__o", "=", "None", "else", ":", "ctxt__o", "=", "ctxt", ".", "_o", "ret", "=", "libxml2mod", ".", "xmlXPathNextAncestorOrSelf", "(", "ctxt__o", ",",...
53.076923
15.307692
def GET_close_server(self) -> None: """Stop and close the *HydPy* server.""" def _close_server(): self.server.shutdown() self.server.server_close() shutter = threading.Thread(target=_close_server) shutter.deamon = True shutter.start()
[ "def", "GET_close_server", "(", "self", ")", "->", "None", ":", "def", "_close_server", "(", ")", ":", "self", ".", "server", ".", "shutdown", "(", ")", "self", ".", "server", ".", "server_close", "(", ")", "shutter", "=", "threading", ".", "Thread", "...
36.375
8.625
def train(self, cloud=None, batch=False, api_key=None, version=None, **kwargs): """ This is the basic training endpoint. Given an existing dataset this endpoint will train a model. Inputs api_key (optional) - String: Your API key, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. cloud (optional) - String: Your private cloud domain, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. """ url_params = {"batch": batch, "api_key": api_key, "version": version, 'method': "train"} return self._api_handler(self.keywords['collection'], cloud=cloud, api="custom", url_params=url_params, **kwargs)
[ "def", "train", "(", "self", ",", "cloud", "=", "None", ",", "batch", "=", "False", ",", "api_key", "=", "None", ",", "version", "=", "None", ",", "*", "*", "kwargs", ")", ":", "url_params", "=", "{", "\"batch\"", ":", "batch", ",", "\"api_key\"", ...
65.785714
39.357143
def equation_of_time(day): """Compute the equation of time for the given date. Uses formula described at https://en.wikipedia.org/wiki/Equation_of_time#Alternative_calculation :param day: The datetime.date to compute the equation of time for :returns: The angle, in radians, of the Equation of Time """ day_of_year = day.toordinal() - date(day.year, 1, 1).toordinal() # pylint: disable=invalid-name # # Distance Earth moves from solstice to January 1 (so about 10 days) # A = EARTH_ORIBITAL_VELOCITY * (day_of_year + 10) # # Distance Earth moves from solstice to day_of_year # 2 is the number of days from Jan 1 to periheleon # This is the result of a lot of constants collapsing # B = A + 1.914 * sin(radians(EARTH_ORIBITAL_VELOCITY * (day_of_year - 2))) # # Compute "the difference between the angles moved at mean speed, and at # the corrected speed projected onto the equatorial plane, and [divide] by # 180 to get the difference in 'half turns'" # movement_on_equatorial_plane = degrees( atan2( tan(radians(B)), cos(EARTH_AXIS_TILT) ) ) eot_half_turns = (A - movement_on_equatorial_plane) / 180 result = 720 * (eot_half_turns - int(eot_half_turns + 0.5)) return radians(result)
[ "def", "equation_of_time", "(", "day", ")", ":", "day_of_year", "=", "day", ".", "toordinal", "(", ")", "-", "date", "(", "day", ".", "year", ",", "1", ",", "1", ")", ".", "toordinal", "(", ")", "# pylint: disable=invalid-name", "#", "# Distance Earth move...
31.756098
25.707317
def fetch_samples(proj, selector_attribute=None, selector_include=None, selector_exclude=None): """ Collect samples of particular protocol(s). Protocols can't be both positively selected for and negatively selected against. That is, it makes no sense and is not allowed to specify both selector_include and selector_exclude protocols. On the other hand, if neither is provided, all of the Project's Samples are returned. If selector_include is specified, Samples without a protocol will be excluded, but if selector_exclude is specified, protocol-less Samples will be included. :param Project proj: the Project with Samples to fetch :param Project str: the sample selector_attribute to select for :param Iterable[str] | str selector_include: protocol(s) of interest; if specified, a Sample must :param Iterable[str] | str selector_exclude: protocol(s) to include :return list[Sample]: Collection of this Project's samples with protocol that either matches one of those in selector_include, or either lacks a protocol or does not match one of those in selector_exclude :raise TypeError: if both selector_include and selector_exclude protocols are specified; TypeError since it's basically providing two arguments when only one is accepted, so remain consistent with vanilla Python2 """ if selector_attribute is None or (not selector_include and not selector_exclude): # Simple; keep all samples. In this case, this function simply # offers a list rather than an iterator. return list(proj.samples) # At least one of the samples has to have the specified attribute if proj.samples and not any([hasattr(i, selector_attribute) for i in proj.samples]): raise AttributeError("The Project samples do not have the attribute '{attr}'" .format(attr=selector_attribute)) # Intersection between selector_include and selector_exclude is nonsense user error. if selector_include and selector_exclude: raise TypeError("Specify only selector_include or selector_exclude parameter, " "not both.") # Ensure that we're working with sets. def make_set(items): if isinstance(items, str): items = [items] return items # Use the attr check here rather than exception block in case the # hypothetical AttributeError would occur; we want such # an exception to arise, not to catch it as if the Sample lacks "protocol" if not selector_include: # Loose; keep all samples not in the selector_exclude. def keep(s): return not hasattr(s, selector_attribute) or \ getattr(s, selector_attribute) not in make_set(selector_exclude) else: # Strict; keep only samples in the selector_include. def keep(s): return hasattr(s, selector_attribute) and \ getattr(s, selector_attribute) in make_set(selector_include) return list(filter(keep, proj.samples))
[ "def", "fetch_samples", "(", "proj", ",", "selector_attribute", "=", "None", ",", "selector_include", "=", "None", ",", "selector_exclude", "=", "None", ")", ":", "if", "selector_attribute", "is", "None", "or", "(", "not", "selector_include", "and", "not", "se...
51.389831
27.186441
def CheckLibWithHeader(context, libs, header, language, call = None, autoadd = 1): # ToDo: accept path for library. Support system header files. """ Another (more sophisticated) test for a library. Checks, if library and header is available for language (may be 'C' or 'CXX'). Call maybe be a valid expression _with_ a trailing ';'. As in CheckLib, we support library=None, to test if the call compiles without extra link flags. """ prog_prefix, dummy = \ createIncludesFromHeaders(header, 0) if libs == []: libs = [None] if not SCons.Util.is_List(libs): libs = [libs] res = SCons.Conftest.CheckLib(context, libs, None, prog_prefix, call = call, language = language, autoadd = autoadd) context.did_show_result = 1 return not res
[ "def", "CheckLibWithHeader", "(", "context", ",", "libs", ",", "header", ",", "language", ",", "call", "=", "None", ",", "autoadd", "=", "1", ")", ":", "# ToDo: accept path for library. Support system header files.", "prog_prefix", ",", "dummy", "=", "createIncludes...
37.863636
19.045455
def sync_accounts(self, accounts_data, clear = False, password=None, cb = None): """ Load all of the accounts from the account section of the config into the database. :param accounts_data: :param password: :return: """ # Map common values into the accounts records all_accounts = self.accounts kmap = Account.prop_map() for account_id, values in accounts_data.items(): if not isinstance(values, dict): continue d = {} a = self.library.find_or_new_account(account_id) a.secret_password = password or self.password for k, v in values.items(): if k in ('id',): continue try: if kmap[k] == 'secret' and v: a.encrypt_secret(v) else: setattr(a, kmap[k], v) except KeyError: d[k] = v a.data = d if values.get('service') == 's3': a.url = 's3://{}'.format(a.account_id) if cb: cb('Loaded account: {}'.format(a.account_id)) self.database.session.commit()
[ "def", "sync_accounts", "(", "self", ",", "accounts_data", ",", "clear", "=", "False", ",", "password", "=", "None", ",", "cb", "=", "None", ")", ":", "# Map common values into the accounts records", "all_accounts", "=", "self", ".", "accounts", "kmap", "=", "...
26.804348
20.673913
def seqingroups(groups,seq): 'helper for contigsub. takes the list of lists returned by groupelts and an array to check.\ returns (groupindex,indexingroup,matchlen) of longest match or None if no match' if not (groups and seq): return None bestmatch=None,None,0 if any(len(g)<2 for g in groups): raise ValueError('some subgroups have length < 2') for i,g in filter(lambda x:x[1][0],enumerate(groups)): # i.e. we're only interested in groups with common elements # begin starts at 0 so begin+1 starts at 1. (first elt of each group is the bool indicator) begin=0 while 1: try: begin=g.index(seq[0],begin+1) except ValueError: break jmax=min(len(g)-begin,len(seq)) for j in range(jmax): if g[begin+j]!=seq[j]: break else: j+=1 # so matchlen works below matchlen=min(j,jmax) if matchlen<bestmatch[2]: continue bestmatch=[i,begin,matchlen] # note: begin is an offset including the initial bool return bestmatch if bestmatch[2] else None
[ "def", "seqingroups", "(", "groups", ",", "seq", ")", ":", "if", "not", "(", "groups", "and", "seq", ")", ":", "return", "None", "bestmatch", "=", "None", ",", "None", ",", "0", "if", "any", "(", "len", "(", "g", ")", "<", "2", "for", "g", "in"...
50.7
22.9
def error(self, msg, *args, **kwargs) -> Task: # type: ignore """ Log msg with severity 'ERROR'. To pass exception information, use the keyword argument exc_info with a true value, e.g. await logger.error("Houston, we have a major problem", exc_info=1) """ return self._make_log_task(logging.ERROR, msg, args, **kwargs)
[ "def", "error", "(", "self", ",", "msg", ",", "*", "args", ",", "*", "*", "kwargs", ")", "->", "Task", ":", "# type: ignore", "return", "self", ".", "_make_log_task", "(", "logging", ".", "ERROR", ",", "msg", ",", "args", ",", "*", "*", "kwargs", "...
36.9
21.9
def table_row(self, content): """Rendering a table row. Like ``<tr>``. :param content: content of current table row. """ contents = content.splitlines() if not contents: return '' clist = ['* ' + contents[0]] if len(contents) > 1: for c in contents[1:]: clist.append(' ' + c) return '\n'.join(clist) + '\n'
[ "def", "table_row", "(", "self", ",", "content", ")", ":", "contents", "=", "content", ".", "splitlines", "(", ")", "if", "not", "contents", ":", "return", "''", "clist", "=", "[", "'* '", "+", "contents", "[", "0", "]", "]", "if", "len", "(", "con...
30.769231
9.615385
def list_related(self, request, pk=None, field_name=None): """Fetch related object(s), as if sideloaded (used to support link objects). This method gets mapped to `/<resource>/<pk>/<field_name>/` by DynamicRouter for all DynamicRelationField fields. Generally, this method probably shouldn't be overridden. An alternative implementation would be to generate reverse queries. For an exploration of that approach, see: https://gist.github.com/ryochiji/54687d675978c7d96503 """ # Explicitly disable support filtering. Applying filters to this # endpoint would require us to pass through sideload filters, which # can have unintended consequences when applied asynchronously. if self.get_request_feature(self.FILTER): raise ValidationError( 'Filtering is not enabled on relation endpoints.' ) # Prefix include/exclude filters with field_name so it's scoped to # the parent object. field_prefix = field_name + '.' self._prefix_inex_params(request, self.INCLUDE, field_prefix) self._prefix_inex_params(request, self.EXCLUDE, field_prefix) # Filter for parent object, include related field. self.request.query_params.add('filter{pk}', pk) self.request.query_params.add(self.INCLUDE, field_prefix) # Get serializer and field. serializer = self.get_serializer() field = serializer.fields.get(field_name) if field is None: raise ValidationError('Unknown field: "%s".' % field_name) # Query for root object, with related field prefetched queryset = self.get_queryset() queryset = self.filter_queryset(queryset) obj = queryset.first() if not obj: return Response("Not found", status=404) # Serialize the related data. Use the field's serializer to ensure # it's configured identically to the sideload case. serializer = field.get_serializer(envelope=True) try: # TODO(ryo): Probably should use field.get_attribute() but that # seems to break a bunch of things. Investigate later. serializer.instance = getattr(obj, field.source) except ObjectDoesNotExist: # See: # http://jsonapi.org/format/#fetching-relationships-responses-404 # This is a case where the "link URL exists but the relationship # is empty" and therefore must return a 200. return Response({}, status=200) return Response(serializer.data)
[ "def", "list_related", "(", "self", ",", "request", ",", "pk", "=", "None", ",", "field_name", "=", "None", ")", ":", "# Explicitly disable support filtering. Applying filters to this", "# endpoint would require us to pass through sideload filters, which", "# can have unintended ...
43.416667
22.516667
async def close(self): """ Closes connection and resets pool """ if self._pool is not None and not isinstance(self.connection, aioredis.Redis): self._pool.close() await self._pool.wait_closed() self._pool = None
[ "async", "def", "close", "(", "self", ")", ":", "if", "self", ".", "_pool", "is", "not", "None", "and", "not", "isinstance", "(", "self", ".", "connection", ",", "aioredis", ".", "Redis", ")", ":", "self", ".", "_pool", ".", "close", "(", ")", "awa...
33.5
11.5
def safe_get(self, section, key): """ Attempt to get a configuration value from a certain section in a ``cfg`` object but returning None if not found. Avoids the need to be doing try/except {ConfigParser Exceptions} every time. """ try: #Use full parent function so we can replace it in the class # if desired return configparser.RawConfigParser.get(self, section, key) except (configparser.NoSectionError, configparser.NoOptionError): return None
[ "def", "safe_get", "(", "self", ",", "section", ",", "key", ")", ":", "try", ":", "#Use full parent function so we can replace it in the class", "# if desired", "return", "configparser", ".", "RawConfigParser", ".", "get", "(", "self", ",", "section", ",", "key", ...
42.692308
17.615385
def create_asset_class(self, item: AssetClass): """ Inserts the record """ session = self.open_session() session.add(item) session.commit()
[ "def", "create_asset_class", "(", "self", ",", "item", ":", "AssetClass", ")", ":", "session", "=", "self", ".", "open_session", "(", ")", "session", ".", "add", "(", "item", ")", "session", ".", "commit", "(", ")" ]
33.4
8.2
def create( self, name, description="", whitelisted_container_task_types=None, whitelisted_executable_task_types=None, ): """Create a task whitelist. Args: name (str): The name of the task whitelist. description (str, optional): A description of the task whitelist. whitelisted_container_task_types (list, optional): A list of whitelisted container task type IDs. whitelisted_executable_task_types (list, optional): A list of whitelisted executable task type IDs. Returns: :class:`saltant.models.task_whitelist.TaskWhitelist`: A task whitelist model instance representing the task whitelist just created. """ # Translate whitelists None to [] if necessary if whitelisted_container_task_types is None: whitelisted_container_task_types = [] if whitelisted_executable_task_types is None: whitelisted_executable_task_types = [] # Create the object request_url = self._client.base_api_url + self.list_url data_to_post = { "name": name, "description": description, "whitelisted_container_task_types": whitelisted_container_task_types, "whitelisted_executable_task_types": whitelisted_executable_task_types, } response = self._client.session.post(request_url, data=data_to_post) # Validate that the request was successful self.validate_request_success( response_text=response.text, request_url=request_url, status_code=response.status_code, expected_status_code=HTTP_201_CREATED, ) # Return a model instance representing the task instance return self.response_data_to_model_instance(response.json())
[ "def", "create", "(", "self", ",", "name", ",", "description", "=", "\"\"", ",", "whitelisted_container_task_types", "=", "None", ",", "whitelisted_executable_task_types", "=", "None", ",", ")", ":", "# Translate whitelists None to [] if necessary", "if", "whitelisted_c...
37.48
21.32
def plot_correlation_heatmap(self): """ Return HTML for correlation heatmap """ data = None corr_type = None correlation_type = getattr(config, 'rna_seqc' ,{}).get('default_correlation', 'spearman') if self.rna_seqc_spearman is not None and correlation_type != 'pearson': data = self.rna_seqc_spearman corr_type = 'Spearman' elif self.rna_seqc_pearson is not None: data = self.rna_seqc_pearson corr_type = 'Pearson' if data is not None: pconfig = { 'id': 'rna_seqc_correlation_heatmap', 'title': 'RNA-SeQC: {} Sample Correlation'.format(corr_type) } self.add_section ( name = '{} Correlation'.format(corr_type), anchor = 'rseqc-rna_seqc_correlation', plot = heatmap.plot(data[1], data[0], data[0], pconfig) )
[ "def", "plot_correlation_heatmap", "(", "self", ")", ":", "data", "=", "None", "corr_type", "=", "None", "correlation_type", "=", "getattr", "(", "config", ",", "'rna_seqc'", ",", "{", "}", ")", ".", "get", "(", "'default_correlation'", ",", "'spearman'", ")...
43.809524
17.380952
def get_system_offset(): """Get system's timezone offset using built-in library time. For the Timezone constants (altzone, daylight, timezone, and tzname), the value is determined by the timezone rules in effect at module load time or the last time tzset() is called and may be incorrect for times in the past. To keep compatibility with Windows, we're always importing time module here. """ import time if time.daylight and time.localtime().tm_isdst > 0: return -time.altzone else: return -time.timezone
[ "def", "get_system_offset", "(", ")", ":", "import", "time", "if", "time", ".", "daylight", "and", "time", ".", "localtime", "(", ")", ".", "tm_isdst", ">", "0", ":", "return", "-", "time", ".", "altzone", "else", ":", "return", "-", "time", ".", "ti...
38.928571
24.571429
def Psat(self, T, polish=False): r'''Generic method to calculate vapor pressure for a specified `T`. From Tc to 0.32Tc, uses a 10th order polynomial of the following form: .. math:: \ln\frac{P_r}{T_r} = \sum_{k=0}^{10} C_k\left(\frac{\alpha}{T_r} -1\right)^{k} If `polish` is True, SciPy's `newton` solver is launched with the calculated vapor pressure as an initial guess in an attempt to get more accuracy. This may not converge however. Results above the critical temperature are meaningless. A first-order polynomial is used to extrapolate under 0.32 Tc; however, there is normally not a volume solution to the EOS which can produce that low of a pressure. Parameters ---------- T : float Temperature, [K] polish : bool, optional Whether to attempt to use a numerical solver to make the solution more precise or not Returns ------- Psat : float Vapor pressure, [Pa] Notes ----- EOSs sharing the same `b`, `delta`, and `epsilon` have the same coefficient sets. All coefficients were derived with numpy's polyfit. The intersection between the polynomials is continuous, but there is a step change in its derivative. Form for the regression is inspired from [1]_. References ---------- .. [1] Soave, G. "Direct Calculation of Pure-Compound Vapour Pressures through Cubic Equations of State." Fluid Phase Equilibria 31, no. 2 (January 1, 1986): 203-7. doi:10.1016/0378-3812(86)90013-0. ''' alpha = self.a_alpha_and_derivatives(T, full=False)/self.a Tr = T/self.Tc x = alpha/Tr - 1. c = self.Psat_coeffs_limiting if Tr < 0.32 else self.Psat_coeffs y = horner(c, x) try: Psat = exp(y)*Tr*self.Pc except OverflowError: # coefficients sometimes overflow before T is lowered to 0.32Tr polish = False Psat = 0 if polish: def to_solve(P): # For use by newton. Only supports initialization with Tc, Pc and omega # ~200x slower and not guaranteed to converge e = self.__class__(Tc=self.Tc, Pc=self.Pc, omega=self.omega, T=T, P=P) err = e.fugacity_l - e.fugacity_g return err Psat = newton(to_solve, Psat) return Psat
[ "def", "Psat", "(", "self", ",", "T", ",", "polish", "=", "False", ")", ":", "alpha", "=", "self", ".", "a_alpha_and_derivatives", "(", "T", ",", "full", "=", "False", ")", "/", "self", ".", "a", "Tr", "=", "T", "/", "self", ".", "Tc", "x", "="...
37.884058
24.492754
def render_field(dictionary, field, prepend=None, append=None, quotes=False, **opts): ''' Render a field found under the ``field`` level of the hierarchy in the ``dictionary`` object. This is useful to render a field in a Jinja template without worrying that the hierarchy might not exist. For example if we do the following in Jinja: ``{{ interfaces.interface.Ethernet5.config.description }}`` for the following object: ``{'interfaces': {'interface': {'Ethernet1': {'config': {'enabled': True}}}}}`` it would error, as the ``Ethernet5`` key does not exist. With this helper, we can skip this and avoid existence checks. This must be however used with care. dictionary The dictionary to traverse. field The key name or part to traverse in the ``dictionary``. prepend: ``None`` The text to prepend in front of the text. Usually, we need to have the name of the field too when generating the configuration. append: ``None`` Text to append at the end. quotes: ``False`` Whether should wrap the text around quotes. CLI Example: .. code-block:: bash salt '*' napalm_formula.render_field "{'enabled': True}" enabled # This would return the value of the ``enabled`` leaf key salt '*' napalm_formula.render_field "{'enabled': True}" description # This would not error Jinja usage example: .. code-block:: jinja {%- set config = {'enabled': True, 'description': 'Interface description'} %} {{ salt.napalm_formula.render_field(config, 'description', quotes=True) }} The example above would be rendered on Arista / Cisco as: .. code-block:: text description "Interface description" While on Junos (the semicolon is important to be added, otherwise the configuration won't be accepted by Junos): .. code-block:: text description "Interface description"; ''' value = traverse(dictionary, field) if value is None: return '' if prepend is None: prepend = field.replace('_', '-') if append is None: if __grains__['os'] in ('junos',): append = ';' else: append = '' if quotes: value = '"{value}"'.format(value=value) return '{prepend} {value}{append}'.format(prepend=prepend, value=value, append=append)
[ "def", "render_field", "(", "dictionary", ",", "field", ",", "prepend", "=", "None", ",", "append", "=", "None", ",", "quotes", "=", "False", ",", "*", "*", "opts", ")", ":", "value", "=", "traverse", "(", "dictionary", ",", "field", ")", "if", "valu...
32.141026
24.705128
def get_extension_reports(self, publisher_name, extension_name, days=None, count=None, after_date=None): """GetExtensionReports. [Preview API] Returns extension reports :param str publisher_name: Name of the publisher who published the extension :param str extension_name: Name of the extension :param int days: Last n days report. If afterDate and days are specified, days will take priority :param int count: Number of events to be returned :param datetime after_date: Use if you want to fetch events newer than the specified date :rtype: object """ route_values = {} if publisher_name is not None: route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str') if extension_name is not None: route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str') query_parameters = {} if days is not None: query_parameters['days'] = self._serialize.query('days', days, 'int') if count is not None: query_parameters['count'] = self._serialize.query('count', count, 'int') if after_date is not None: query_parameters['afterDate'] = self._serialize.query('after_date', after_date, 'iso-8601') response = self._send(http_method='GET', location_id='79e0c74f-157f-437e-845f-74fbb4121d4c', version='5.1-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('object', response)
[ "def", "get_extension_reports", "(", "self", ",", "publisher_name", ",", "extension_name", ",", "days", "=", "None", ",", "count", "=", "None", ",", "after_date", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "publisher_name", "is", "not", "N...
59.5
26.357143
def _should_proxy(self, attr): """ Determines whether `attr` should be looked up on the proxied object, or the proxy itself. """ if attr in type(self).__notproxied__: return False if _oga(self, "__notproxied__") is True: return False return True
[ "def", "_should_proxy", "(", "self", ",", "attr", ")", ":", "if", "attr", "in", "type", "(", "self", ")", ".", "__notproxied__", ":", "return", "False", "if", "_oga", "(", "self", ",", "\"__notproxied__\"", ")", "is", "True", ":", "return", "False", "r...
28.727273
15.454545
def create(self, instance, parameters, existing=True): """Create an instance Args: instance (AtlasServiceInstance.Instance): Existing or New instance parameters (dict): Parameters for the instance Keyword Arguments: existing (bool): True (use an existing cluster), False (create a new cluster) Returns: ProvisionedServiceSpec: Status """ return self.service_instance.create(instance, parameters, existing)
[ "def", "create", "(", "self", ",", "instance", ",", "parameters", ",", "existing", "=", "True", ")", ":", "return", "self", ".", "service_instance", ".", "create", "(", "instance", ",", "parameters", ",", "existing", ")" ]
37.071429
22.357143
def _get_object_as_soft(self): """Get object as SOFT formatted string.""" soft = [] if self.database is not None: soft.append(self.database._get_object_as_soft()) soft += ["^%s = %s" % (self.geotype, self.name), self._get_metadata_as_string()] for gsm in itervalues(self.gsms): soft.append(gsm._get_object_as_soft()) for gpl in itervalues(self.gpls): soft.append(gpl._get_object_as_soft()) return "\n".join(soft)
[ "def", "_get_object_as_soft", "(", "self", ")", ":", "soft", "=", "[", "]", "if", "self", ".", "database", "is", "not", "None", ":", "soft", ".", "append", "(", "self", ".", "database", ".", "_get_object_as_soft", "(", ")", ")", "soft", "+=", "[", "\...
39.230769
11.692308
def update_from_response(self, response): """ Update the state of the Table object based on the response data received from Amazon DynamoDB. """ if 'Table' in response: self._dict.update(response['Table']) elif 'TableDescription' in response: self._dict.update(response['TableDescription']) if 'KeySchema' in self._dict: self._schema = Schema(self._dict['KeySchema'])
[ "def", "update_from_response", "(", "self", ",", "response", ")", ":", "if", "'Table'", "in", "response", ":", "self", ".", "_dict", ".", "update", "(", "response", "[", "'Table'", "]", ")", "elif", "'TableDescription'", "in", "response", ":", "self", ".",...
40.818182
8.272727
def represent_pixel_location(self): """ Returns a NumPy array that represents the 2D pixel location, which is defined by PFNC, of the original image data. You may use the returned NumPy array for a calculation to map the original image to another format. :return: A NumPy array that represents the 2D pixel location. """ if self.data is None: return None # return self._data.reshape( self.height + self.y_padding, int(self.width * self._num_components_per_pixel + self.x_padding) )
[ "def", "represent_pixel_location", "(", "self", ")", ":", "if", "self", ".", "data", "is", "None", ":", "return", "None", "#", "return", "self", ".", "_data", ".", "reshape", "(", "self", ".", "height", "+", "self", ".", "y_padding", ",", "int", "(", ...
32.833333
20.611111
def create_authorizer(self, restapi, uri, authorizer): """ Create Authorizer for API gateway """ authorizer_type = authorizer.get("type", "TOKEN").upper() identity_validation_expression = authorizer.get('validation_expression', None) authorizer_resource = troposphere.apigateway.Authorizer("Authorizer") authorizer_resource.RestApiId = troposphere.Ref(restapi) authorizer_resource.Name = authorizer.get("name", "ZappaAuthorizer") authorizer_resource.Type = authorizer_type authorizer_resource.AuthorizerUri = uri authorizer_resource.IdentitySource = "method.request.header.%s" % authorizer.get('token_header', 'Authorization') if identity_validation_expression: authorizer_resource.IdentityValidationExpression = identity_validation_expression if authorizer_type == 'TOKEN': if not self.credentials_arn: self.get_credentials_arn() authorizer_resource.AuthorizerResultTtlInSeconds = authorizer.get('result_ttl', 300) authorizer_resource.AuthorizerCredentials = self.credentials_arn if authorizer_type == 'COGNITO_USER_POOLS': authorizer_resource.ProviderARNs = authorizer.get('provider_arns') self.cf_api_resources.append(authorizer_resource.title) self.cf_template.add_resource(authorizer_resource) return authorizer_resource
[ "def", "create_authorizer", "(", "self", ",", "restapi", ",", "uri", ",", "authorizer", ")", ":", "authorizer_type", "=", "authorizer", ".", "get", "(", "\"type\"", ",", "\"TOKEN\"", ")", ".", "upper", "(", ")", "identity_validation_expression", "=", "authoriz...
50.5
24.571429
def print_nodes(nodes, detailed=False): """Prints all the given nodes""" found = 0 for node in nodes: found += 1 print_node(node, detailed=detailed) print("\nFound {0} node{1}".format(found, "s" if found != 1 else ""))
[ "def", "print_nodes", "(", "nodes", ",", "detailed", "=", "False", ")", ":", "found", "=", "0", "for", "node", "in", "nodes", ":", "found", "+=", "1", "print_node", "(", "node", ",", "detailed", "=", "detailed", ")", "print", "(", "\"\\nFound {0} node{1}...
34.857143
14.857143
def _transformBy(self, matrix, **kwargs): """ Subclasses may override this method. """ for contour in self.contours: contour.transformBy(matrix) for component in self.components: component.transformBy(matrix) for anchor in self.anchors: anchor.transformBy(matrix) for guideline in self.guidelines: guideline.transformBy(matrix)
[ "def", "_transformBy", "(", "self", ",", "matrix", ",", "*", "*", "kwargs", ")", ":", "for", "contour", "in", "self", ".", "contours", ":", "contour", ".", "transformBy", "(", "matrix", ")", "for", "component", "in", "self", ".", "components", ":", "co...
35
1.666667
def get_structures(self, chemsys_formula_id, final=True): """ Get a list of Structures corresponding to a chemical system, formula, or materials_id. Args: chemsys_formula_id (str): A chemical system (e.g., Li-Fe-O), or formula (e.g., Fe2O3) or materials_id (e.g., mp-1234). final (bool): Whether to get the final structure, or the initial (pre-relaxation) structure. Defaults to True. Returns: List of Structure objects. """ prop = "final_structure" if final else "initial_structure" data = self.get_data(chemsys_formula_id, prop=prop) return [d[prop] for d in data]
[ "def", "get_structures", "(", "self", ",", "chemsys_formula_id", ",", "final", "=", "True", ")", ":", "prop", "=", "\"final_structure\"", "if", "final", "else", "\"initial_structure\"", "data", "=", "self", ".", "get_data", "(", "chemsys_formula_id", ",", "prop"...
40.705882
21.882353
def link_file(self, path, prefixed_path, source_storage): """ Attempt to link ``path`` """ # Skip this file if it was already copied earlier if prefixed_path in self.symlinked_files: return self.log("Skipping '%s' (already linked earlier)" % path) # Delete the target file if needed or break if not self.delete_file(path, prefixed_path, source_storage): return # The full path of the source file source_path = source_storage.path(path) # Finally link the file if self.dry_run: self.log("Pretending to link '%s'" % source_path, level=1) else: self.log("Linking '%s'" % source_path, level=1) full_path = self.storage.path(prefixed_path) try: os.makedirs(os.path.dirname(full_path)) except OSError: pass try: if os.path.lexists(full_path): os.unlink(full_path) os.symlink(source_path, full_path) except AttributeError: import platform raise CommandError("Symlinking is not supported by Python %s." % platform.python_version()) except NotImplementedError: import platform raise CommandError("Symlinking is not supported in this " "platform (%s)." % platform.platform()) except OSError as e: raise CommandError(e) if prefixed_path not in self.symlinked_files: self.symlinked_files.append(prefixed_path)
[ "def", "link_file", "(", "self", ",", "path", ",", "prefixed_path", ",", "source_storage", ")", ":", "# Skip this file if it was already copied earlier", "if", "prefixed_path", "in", "self", ".", "symlinked_files", ":", "return", "self", ".", "log", "(", "\"Skipping...
43.210526
15.210526
def open_file_dialog(windowTitle, wildcard, defaultDir=os.getcwd(), style=None, parent=None): """ Opens a wx widget file select dialog. Wild card specifies which kinds of files are allowed. Style - specifies style of dialog (read wx documentation for information) """ if parent == None: app = wx.App(None) if style == None: style = wx.OPEN | wx.CHANGE_DIR dialog = wx.FileDialog(parent, windowTitle, defaultDir=defaultDir, wildcard=wildcard, style=style) if dialog.ShowModal() == wx.ID_OK: path = dialog.GetPath() else: path = None dialog.Destroy() return path
[ "def", "open_file_dialog", "(", "windowTitle", ",", "wildcard", ",", "defaultDir", "=", "os", ".", "getcwd", "(", ")", ",", "style", "=", "None", ",", "parent", "=", "None", ")", ":", "if", "parent", "==", "None", ":", "app", "=", "wx", ".", "App", ...
31.190476
23.095238
def item(self, index: int) -> Optional[Node]: """Return item with the index. If the index is negative number or out of the list, return None. """ if not isinstance(index, int): raise TypeError( 'Indeces must be integer, not {}'.format(type(index))) return self.__nodes[index] if 0 <= index < self.length else None
[ "def", "item", "(", "self", ",", "index", ":", "int", ")", "->", "Optional", "[", "Node", "]", ":", "if", "not", "isinstance", "(", "index", ",", "int", ")", ":", "raise", "TypeError", "(", "'Indeces must be integer, not {}'", ".", "format", "(", "type",...
41.555556
17
def _get_equivalent_distances_east(wid, lng, mag, repi, focal_depth=10., ab06=False): """ Computes equivalent values of Joyner-Boore and closest distance to the rupture given epoicentral distance. The procedure is described in Atkinson (2012) - Appendix A (page 32). :param float wid: Width of rectangular rupture :param float lng: Length of rectangular rupture :param float mag: Magnitude :param repi: A :class:`numpy.ndarray` instance containing repi values :param float focal_depth: Focal depth :param boolean ab06: When true a minimum ztor value is set to force near-source saturation """ dtop = focal_depth - 0.5*wid # this computes a minimum ztor value - used for AB2006 if ab06: ztor_ab06 = 21-2.5*mag dtop = np.max([ztor_ab06, dtop]) ztor = max(0, dtop) # find the average distance to the fault projection dsurf = np.max([repi-0.3*lng, 0.1*np.ones_like(repi)], axis=0) # rrup rrup = (dsurf**2+ztor**2)**0.5 # return rjb and rrup return dsurf, rrup
[ "def", "_get_equivalent_distances_east", "(", "wid", ",", "lng", ",", "mag", ",", "repi", ",", "focal_depth", "=", "10.", ",", "ab06", "=", "False", ")", ":", "dtop", "=", "focal_depth", "-", "0.5", "*", "wid", "# this computes a minimum ztor value - used for AB...
34.625
17.375
def _compute_term2(self, C, mag, r): """ This computes the term f2 equation 8 Drouet & Cotton (2015) """ return (C['c4'] + C['c5'] * mag) * \ np.log(np.sqrt(r**2 + C['c6']**2)) + C['c7'] * r
[ "def", "_compute_term2", "(", "self", ",", "C", ",", "mag", ",", "r", ")", ":", "return", "(", "C", "[", "'c4'", "]", "+", "C", "[", "'c5'", "]", "*", "mag", ")", "*", "np", ".", "log", "(", "np", ".", "sqrt", "(", "r", "**", "2", "+", "C...
38.166667
9.166667
def to_netflux(flux): r"""Compute the netflux. f_ij^{+}=max{0, f_ij-f_ji} for all pairs i,j Parameters ---------- flux : (M, M) scipy.sparse matrix Matrix of flux values between pairs of states. Returns ------- netflux : (M, M) scipy.sparse matrix Matrix of netflux values between pairs of states. """ netflux = flux - flux.T """Set negative entries to zero""" netflux = remove_negative_entries(netflux) return netflux
[ "def", "to_netflux", "(", "flux", ")", ":", "netflux", "=", "flux", "-", "flux", ".", "T", "\"\"\"Set negative entries to zero\"\"\"", "netflux", "=", "remove_negative_entries", "(", "netflux", ")", "return", "netflux" ]
21.545455
19.681818
def _dfromtimestamp(timestamp): """Custom date timestamp constructor. ditto """ try: return datetime.date.fromtimestamp(timestamp) except OSError: timestamp -= time.timezone d = datetime.date(1970, 1, 1) + datetime.timedelta(seconds=timestamp) if _isdst(d): timestamp += 3600 d = datetime.date(1970, 1, 1) + datetime.timedelta(seconds=timestamp) return d
[ "def", "_dfromtimestamp", "(", "timestamp", ")", ":", "try", ":", "return", "datetime", ".", "date", ".", "fromtimestamp", "(", "timestamp", ")", "except", "OSError", ":", "timestamp", "-=", "time", ".", "timezone", "d", "=", "datetime", ".", "date", "(", ...
35.25
17.75
def combine_calls(*args): """Combine multiple callsets into a final set of merged calls. """ if len(args) == 3: is_cwl = False batch_id, samples, data = args caller_names, vrn_files = _organize_variants(samples, batch_id) else: is_cwl = True samples = [utils.to_single_data(x) for x in args] samples = [cwlutils.unpack_tarballs(x, x) for x in samples] data = samples[0] batch_id = data["batch_id"] caller_names = data["variants"]["variantcallers"] vrn_files = data["variants"]["calls"] logger.info("Ensemble consensus calls for {0}: {1}".format( batch_id, ",".join(caller_names))) edata = copy.deepcopy(data) base_dir = utils.safe_makedir(os.path.join(edata["dirs"]["work"], "ensemble", batch_id)) if any([vcfutils.vcf_has_variants(f) for f in vrn_files]): # Decompose multiallelic variants and normalize passonly = not tz.get_in(["config", "algorithm", "ensemble", "use_filtered"], edata, False) vrn_files = [normalize.normalize(f, data, passonly=passonly, rerun_effects=False, remove_oldeffects=True, nonrefonly=True, work_dir=utils.safe_makedir(os.path.join(base_dir, c))) for c, f in zip(caller_names, vrn_files)] if "classifiers" not in (dd.get_ensemble(edata) or {}): callinfo = _run_ensemble_intersection(batch_id, vrn_files, caller_names, base_dir, edata) else: config_file = _write_config_file(batch_id, caller_names, base_dir, edata) callinfo = _run_ensemble(batch_id, vrn_files, config_file, base_dir, dd.get_ref_file(edata), edata) callinfo["vrn_file"] = vcfutils.bgzip_and_index(callinfo["vrn_file"], data["config"]) # After decomposing multiallelic variants and normalizing, re-evaluate effects ann_ma_file, _ = effects.add_to_vcf(callinfo["vrn_file"], data) if ann_ma_file: callinfo["vrn_file"] = ann_ma_file edata["config"]["algorithm"]["variantcaller"] = "ensemble" edata["vrn_file"] = callinfo["vrn_file"] edata["ensemble_bed"] = callinfo["bed_file"] callinfo["validate"] = validate.compare_to_rm(edata)[0][0].get("validate") else: out_vcf_file = os.path.join(base_dir, "{0}-ensemble.vcf".format(batch_id)) vcfutils.write_empty_vcf(out_vcf_file, samples=[dd.get_sample_name(d) for d in samples]) callinfo = {"variantcaller": "ensemble", "vrn_file": vcfutils.bgzip_and_index(out_vcf_file, data["config"]), "bed_file": None} if is_cwl: callinfo["batch_samples"] = data["batch_samples"] callinfo["batch_id"] = batch_id return [{"ensemble": callinfo}] else: return [[batch_id, callinfo]]
[ "def", "combine_calls", "(", "*", "args", ")", ":", "if", "len", "(", "args", ")", "==", "3", ":", "is_cwl", "=", "False", "batch_id", ",", "samples", ",", "data", "=", "args", "caller_names", ",", "vrn_files", "=", "_organize_variants", "(", "samples", ...
53.203704
24.888889
def handle_hooks(self, hooks, hook_type, *args): ''' Processes hooks of the specified type. :param hook_type: The type of hook, including ``before``, ``after``, ``on_error``, and ``on_route``. :param \*args: Arguments to pass to the hooks. ''' if hook_type not in ['before', 'on_route']: hooks = reversed(hooks) for hook in hooks: result = getattr(hook, hook_type)(*args) # on_error hooks can choose to return a Response, which will # be used instead of the standard error pages. if hook_type == 'on_error' and isinstance(result, WebObResponse): return result
[ "def", "handle_hooks", "(", "self", ",", "hooks", ",", "hook_type", ",", "*", "args", ")", ":", "if", "hook_type", "not", "in", "[", "'before'", ",", "'on_route'", "]", ":", "hooks", "=", "reversed", "(", "hooks", ")", "for", "hook", "in", "hooks", "...
41.352941
21.117647
def _handle_amqp_frame(self, data_in): """Unmarshal a single AMQP frame and return the result. :param data_in: socket data :return: data_in, channel_id, frame """ if not data_in: return data_in, None, None try: byte_count, channel_id, frame_in = pamqp_frame.unmarshal(data_in) return data_in[byte_count:], channel_id, frame_in except pamqp_exception.UnmarshalingException: pass except specification.AMQPFrameError as why: LOGGER.error('AMQPFrameError: %r', why, exc_info=True) except ValueError as why: LOGGER.error(why, exc_info=True) self.exceptions.append(AMQPConnectionError(why)) return data_in, None, None
[ "def", "_handle_amqp_frame", "(", "self", ",", "data_in", ")", ":", "if", "not", "data_in", ":", "return", "data_in", ",", "None", ",", "None", "try", ":", "byte_count", ",", "channel_id", ",", "frame_in", "=", "pamqp_frame", ".", "unmarshal", "(", "data_i...
37.9
15.3
def validate(self): """ validate: Makes sure content node is valid Args: None Returns: boolean indicating if content node is valid """ assert isinstance(self.author, str) , "Assumption Failed: Author is not a string" assert isinstance(self.aggregator, str) , "Assumption Failed: Aggregator is not a string" assert isinstance(self.provider, str) , "Assumption Failed: Provider is not a string" assert isinstance(self.files, list), "Assumption Failed: Files is not a list" assert isinstance(self.questions, list), "Assumption Failed: Questions is not a list" assert isinstance(self.extra_fields, dict), "Assumption Failed: Extra fields is not a dict" return super(TreeNode, self).validate()
[ "def", "validate", "(", "self", ")", ":", "assert", "isinstance", "(", "self", ".", "author", ",", "str", ")", ",", "\"Assumption Failed: Author is not a string\"", "assert", "isinstance", "(", "self", ".", "aggregator", ",", "str", ")", ",", "\"Assumption Faile...
64.416667
32.166667
def loadAnns(self, ids=[]): """ Load anns with the specified ids. :param ids (int array) : integer ids specifying anns :return: anns (object array) : loaded ann objects """ if type(ids) == list: return [self.anns[id] for id in ids] elif type(ids) == int: return [self.anns[ids]]
[ "def", "loadAnns", "(", "self", ",", "ids", "=", "[", "]", ")", ":", "if", "type", "(", "ids", ")", "==", "list", ":", "return", "[", "self", ".", "anns", "[", "id", "]", "for", "id", "in", "ids", "]", "elif", "type", "(", "ids", ")", "==", ...
35.5
9.1
def forecast_names(self): """get the forecast names from the pestpp options (if any). Returns None if no forecasts are named Returns ------- forecast_names : list a list of forecast names. """ if "forecasts" in self.pestpp_options.keys(): return self.pestpp_options["forecasts"].lower().split(',') elif "predictions" in self.pestpp_options.keys(): return self.pestpp_options["predictions"].lower().split(',') else: return None
[ "def", "forecast_names", "(", "self", ")", ":", "if", "\"forecasts\"", "in", "self", ".", "pestpp_options", ".", "keys", "(", ")", ":", "return", "self", ".", "pestpp_options", "[", "\"forecasts\"", "]", ".", "lower", "(", ")", ".", "split", "(", "','", ...
33.3125
18.8125
def autosize_fieldname(idfobject): """return autsizeable field names in idfobject""" # undocumented stuff in this code return [fname for (fname, dct) in zip(idfobject.objls, idfobject['objidd']) if 'autosizable' in dct]
[ "def", "autosize_fieldname", "(", "idfobject", ")", ":", "# undocumented stuff in this code", "return", "[", "fname", "for", "(", "fname", ",", "dct", ")", "in", "zip", "(", "idfobject", ".", "objls", ",", "idfobject", "[", "'objidd'", "]", ")", "if", "'auto...
46.666667
8.833333
def from_db_value(self, value, expression, connection, context): """ Convert a string from the database into an Enum value """ if value is None: return value return self.enum[value]
[ "def", "from_db_value", "(", "self", ",", "value", ",", "expression", ",", "connection", ",", "context", ")", ":", "if", "value", "is", "None", ":", "return", "value", "return", "self", ".", "enum", "[", "value", "]" ]
32.428571
12.142857
def get_pos(vcf_line): """ Very lightweight parsing of a vcf line to get position. Returns a dict containing: 'chrom': index of chromosome (int), indicates sort order 'pos': position on chromosome (int) """ if not vcf_line: return None vcf_data = vcf_line.strip().split('\t') return_data = dict() return_data['chrom'] = CHROM_INDEX[vcf_data[0]] return_data['pos'] = int(vcf_data[1]) return return_data
[ "def", "get_pos", "(", "vcf_line", ")", ":", "if", "not", "vcf_line", ":", "return", "None", "vcf_data", "=", "vcf_line", ".", "strip", "(", ")", ".", "split", "(", "'\\t'", ")", "return_data", "=", "dict", "(", ")", "return_data", "[", "'chrom'", "]",...
33.066667
13.333333
def user_info(self, kv): """Sets user_info dict entry through a tuple.""" key, value = kv self.__user_info[key] = value
[ "def", "user_info", "(", "self", ",", "kv", ")", ":", "key", ",", "value", "=", "kv", "self", ".", "__user_info", "[", "key", "]", "=", "value" ]
28
15.2
def _new_chart_graphicFrame(self, rId, x, y, cx, cy): """ Return a newly created `p:graphicFrame` element having the specified position and size and containing the chart identified by *rId*. """ id_, name = self.shape_id, self.name return CT_GraphicalObjectFrame.new_chart_graphicFrame( id_, name, rId, x, y, cx, cy )
[ "def", "_new_chart_graphicFrame", "(", "self", ",", "rId", ",", "x", ",", "y", ",", "cx", ",", "cy", ")", ":", "id_", ",", "name", "=", "self", ".", "shape_id", ",", "self", ".", "name", "return", "CT_GraphicalObjectFrame", ".", "new_chart_graphicFrame", ...
41.888889
15.222222
def end_of_history(self, current): # (M->) u'''Move to the end of the input history, i.e., the line currently being entered.''' self.history_cursor = len(self.history) current.set_line(self.history[-1].get_line_text())
[ "def", "end_of_history", "(", "self", ",", "current", ")", ":", "# (M->)\r", "self", ".", "history_cursor", "=", "len", "(", "self", ".", "history", ")", "current", ".", "set_line", "(", "self", ".", "history", "[", "-", "1", "]", ".", "get_line_text", ...
50
15.6
def virtual_network_delete(name, resource_group, **kwargs): ''' .. versionadded:: 2019.2.0 Delete a virtual network. :param name: The name of the virtual network to delete. :param resource_group: The resource group name assigned to the virtual network CLI Example: .. code-block:: bash salt-call azurearm_network.virtual_network_delete testnet testgroup ''' result = False netconn = __utils__['azurearm.get_client']('network', **kwargs) try: vnet = netconn.virtual_networks.delete( virtual_network_name=name, resource_group_name=resource_group ) vnet.wait() result = True except CloudError as exc: __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs) return result
[ "def", "virtual_network_delete", "(", "name", ",", "resource_group", ",", "*", "*", "kwargs", ")", ":", "result", "=", "False", "netconn", "=", "__utils__", "[", "'azurearm.get_client'", "]", "(", "'network'", ",", "*", "*", "kwargs", ")", "try", ":", "vne...
25.483871
25.806452
def get_avatar_upload_to(self, filename): """ Returns the path to upload the associated avatar to. """ dummy, ext = os.path.splitext(filename) return os.path.join( machina_settings.PROFILE_AVATAR_UPLOAD_TO, '{id}{ext}'.format(id=str(uuid.uuid4()).replace('-', ''), ext=ext), )
[ "def", "get_avatar_upload_to", "(", "self", ",", "filename", ")", ":", "dummy", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "return", "os", ".", "path", ".", "join", "(", "machina_settings", ".", "PROFILE_AVATAR_UPLOAD_TO", "...
46.571429
14.857143
def sort_annotations(annotations: List[Tuple[int, int, str]] ) -> List[Tuple[int, int, str]]: """ Sorts the annotations by their start_time. """ return sorted(annotations, key=lambda x: x[0])
[ "def", "sort_annotations", "(", "annotations", ":", "List", "[", "Tuple", "[", "int", ",", "int", ",", "str", "]", "]", ")", "->", "List", "[", "Tuple", "[", "int", ",", "int", ",", "str", "]", "]", ":", "return", "sorted", "(", "annotations", ",",...
54.25
10.75
def _send_guess(self,value): """ Send the argument as a string in a way that should (probably, maybe!) be processed properly by C++ calls like atoi, atof, etc. This method is NOT RECOMMENDED, particularly for floats, because values are often mangled silently. Instead, specify a format (e.g. "f") and use the CmdMessenger::readBinArg<CAST> method (e.g. c.readBinArg<float>();) to read the values on the arduino side. """ if type(value) != str and type(value) != bytes and self.give_warnings: w = "Warning: Sending {} as a string. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value) warnings.warn(w,Warning) if type(value) == float: return "{:.10e}".format(value).encode("ascii") elif type(value) == bool: return "{}".format(int(value)).encode("ascii") else: return self._send_string(value)
[ "def", "_send_guess", "(", "self", ",", "value", ")", ":", "if", "type", "(", "value", ")", "!=", "str", "and", "type", "(", "value", ")", "!=", "bytes", "and", "self", ".", "give_warnings", ":", "w", "=", "\"Warning: Sending {} as a string. This can give wi...
49.4
26.1
def timescale_sensitivity(T, k): """ calculate the sensitivity matrix for timescale k given transition matrix T. Parameters ---------- T : numpy.ndarray shape = (n, n) Transition matrix k : int timescale index for timescales of descending order (k = 0 for the infinite one) Returns ------- x : ndarray, shape=(n, n) Sensitivity matrix for entry index around transition matrix T. Reversibility is not assumed. """ eValues, rightEigenvectors = numpy.linalg.eig(T) leftEigenvectors = numpy.linalg.inv(rightEigenvectors) perm = numpy.argsort(eValues)[::-1] eValues = eValues[perm] rightEigenvectors = rightEigenvectors[:, perm] leftEigenvectors = leftEigenvectors[perm] eVal = eValues[k] sensitivity = numpy.outer(leftEigenvectors[k], rightEigenvectors[:, k]) if eVal < 1.0: factor = 1.0 / (numpy.log(eVal) ** 2) / eVal else: factor = 0.0 sensitivity *= factor return sensitivity
[ "def", "timescale_sensitivity", "(", "T", ",", "k", ")", ":", "eValues", ",", "rightEigenvectors", "=", "numpy", ".", "linalg", ".", "eig", "(", "T", ")", "leftEigenvectors", "=", "numpy", ".", "linalg", ".", "inv", "(", "rightEigenvectors", ")", "perm", ...
26.378378
24.702703
def get_b64_image_prediction(self, model_id, b64_encoded_string, token=None, url=API_GET_PREDICTION_IMAGE_URL): """ Gets a prediction from a supplied image enconded as a b64 string, useful when uploading images to a server backed by this library. :param model_id: string, once you train a model you'll be given a model id to use. :param b64_encoded_string: string, a b64 enconded string representation of an image. returns: requests object """ auth = 'Bearer ' + self.check_for_token(token) h = {'Authorization': auth, 'Cache-Control':'no-cache'} the_url = url encoded_string = b64_encoded_string m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id}) h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type} r = requests.post(the_url, headers=h, data=m) return r
[ "def", "get_b64_image_prediction", "(", "self", ",", "model_id", ",", "b64_encoded_string", ",", "token", "=", "None", ",", "url", "=", "API_GET_PREDICTION_IMAGE_URL", ")", ":", "auth", "=", "'Bearer '", "+", "self", ".", "check_for_token", "(", "token", ")", ...
52.888889
29.333333
def translation_table(language, filepath='supported_translations.json'): ''' Opens up file located under the etc directory containing language codes and prints them out. :param file: Path to location of json file :type file: str :return: language codes :rtype: dict ''' fullpath = abspath(join(dirname(__file__), 'etc', filepath)) if not isfile(fullpath): raise IOError('File does not exist at {0}'.format(fullpath)) with open(fullpath, 'rt') as fp: raw_data = json.load(fp).get(language, None) assert(raw_data is not None) return dict((code['language'], code['name']) for code in raw_data)
[ "def", "translation_table", "(", "language", ",", "filepath", "=", "'supported_translations.json'", ")", ":", "fullpath", "=", "abspath", "(", "join", "(", "dirname", "(", "__file__", ")", ",", "'etc'", ",", "filepath", ")", ")", "if", "not", "isfile", "(", ...
30.809524
24.52381
def pack_column_flat(self, value, components=None, offset=False): """ TODO: add documentation """ if components: if isinstance(components, str): components = [components] elif isinstance(components, list): components = components else: raise TypeError("components should be list or string, not {}".format(type(components))) elif isinstance(value, dict): components = value.keys() elif isinstance(value, list): components = self._dict.keys() value = {c: v for c,v in zip(components, value)} if offset: values = [] offsetN = 0 for c in components: values.append(value[c]+offsetN) offsetN += len(self[c]['vertices']) else: values = [value[c] for c in components] if len(value[components[0]].shape) > 1: return np.vstack(values) else: return np.hstack(values)
[ "def", "pack_column_flat", "(", "self", ",", "value", ",", "components", "=", "None", ",", "offset", "=", "False", ")", ":", "if", "components", ":", "if", "isinstance", "(", "components", ",", "str", ")", ":", "components", "=", "[", "components", "]", ...
33.290323
14.903226
def load_related(self, related, *related_fields): '''It returns a new :class:`Query` that automatically follows the foreign-key relationship ``related``. :parameter related: A field name corresponding to a :class:`ForeignKey` in :attr:`Query.model`. :parameter related_fields: optional :class:`Field` names for the ``related`` model to load. If not provided, all fields will be loaded. This function is :ref:`performance boost <performance-loadrelated>` when accessing the related fields of all (most) objects in your query. If Your model contains more than one foreign key, you can use this function in a generative way:: qs = myquery.load_related('rel1').load_related('rel2','field1','field2') :rtype: a new :class:`Query`.''' field = self._get_related_field(related) if not field: raise FieldError('"%s" is not a related field for "%s"' % (related, self._meta)) q = self._clone() return q._add_to_load_related(field, *related_fields)
[ "def", "load_related", "(", "self", ",", "related", ",", "*", "related_fields", ")", ":", "field", "=", "self", ".", "_get_related_field", "(", "related", ")", "if", "not", "field", ":", "raise", "FieldError", "(", "'\"%s\" is not a related field for \"%s\"'", "...
43.083333
25.083333
def _from_dict(cls, _dict): """Initialize a DialogNode object from a json dictionary.""" args = {} if 'dialog_node' in _dict: args['dialog_node'] = _dict.get('dialog_node') else: raise ValueError( 'Required property \'dialog_node\' not present in DialogNode JSON' ) if 'description' in _dict: args['description'] = _dict.get('description') if 'conditions' in _dict: args['conditions'] = _dict.get('conditions') if 'parent' in _dict: args['parent'] = _dict.get('parent') if 'previous_sibling' in _dict: args['previous_sibling'] = _dict.get('previous_sibling') if 'output' in _dict: args['output'] = DialogNodeOutput._from_dict(_dict.get('output')) if 'context' in _dict: args['context'] = _dict.get('context') if 'metadata' in _dict: args['metadata'] = _dict.get('metadata') if 'next_step' in _dict: args['next_step'] = DialogNodeNextStep._from_dict( _dict.get('next_step')) if 'title' in _dict: args['title'] = _dict.get('title') if 'type' in _dict or 'node_type' in _dict: args['node_type'] = _dict.get('type') or _dict.get('node_type') if 'event_name' in _dict: args['event_name'] = _dict.get('event_name') if 'variable' in _dict: args['variable'] = _dict.get('variable') if 'actions' in _dict: args['actions'] = [ DialogNodeAction._from_dict(x) for x in (_dict.get('actions')) ] if 'digress_in' in _dict: args['digress_in'] = _dict.get('digress_in') if 'digress_out' in _dict: args['digress_out'] = _dict.get('digress_out') if 'digress_out_slots' in _dict: args['digress_out_slots'] = _dict.get('digress_out_slots') if 'user_label' in _dict: args['user_label'] = _dict.get('user_label') if 'disabled' in _dict: args['disabled'] = _dict.get('disabled') if 'created' in _dict: args['created'] = string_to_datetime(_dict.get('created')) if 'updated' in _dict: args['updated'] = string_to_datetime(_dict.get('updated')) return cls(**args)
[ "def", "_from_dict", "(", "cls", ",", "_dict", ")", ":", "args", "=", "{", "}", "if", "'dialog_node'", "in", "_dict", ":", "args", "[", "'dialog_node'", "]", "=", "_dict", ".", "get", "(", "'dialog_node'", ")", "else", ":", "raise", "ValueError", "(", ...
43.792453
14.886792
def modified_data_decorator(function): """ Decorator to initialise the modified_data if necessary. To be used in list functions to modify the list """ @wraps(function) def func(self, *args, **kwargs): """Decorator function""" if not self.get_read_only() or not self.is_locked(): self.initialise_modified_data() return function(self, *args, **kwargs) return lambda: None return func
[ "def", "modified_data_decorator", "(", "function", ")", ":", "@", "wraps", "(", "function", ")", "def", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Decorator function\"\"\"", "if", "not", "self", ".", "get_read_only", "...
29.666667
16.2
def createConnection(self): """Return a CardConnection to the Card object.""" readerobj = None if isinstance(self.reader, Reader): readerobj = self.reader elif type(self.reader) == str: for reader in readers(): if self.reader == str(reader): readerobj = reader if readerobj: return readerobj.createConnection() else: # raise CardConnectionException( # 'not a valid reader: ' + str(self.reader)) return None
[ "def", "createConnection", "(", "self", ")", ":", "readerobj", "=", "None", "if", "isinstance", "(", "self", ".", "reader", ",", "Reader", ")", ":", "readerobj", "=", "self", ".", "reader", "elif", "type", "(", "self", ".", "reader", ")", "==", "str", ...
34.25
11.3125
def email_url_config(cls, url, backend=None): """Parses an email URL.""" config = {} url = urlparse(url) if not isinstance(url, cls.URL_CLASS) else url # Remove query strings path = url.path[1:] path = unquote_plus(path.split('?', 2)[0]) # Update with environment configuration config.update({ 'EMAIL_FILE_PATH': path, 'EMAIL_HOST_USER': _cast_urlstr(url.username), 'EMAIL_HOST_PASSWORD': _cast_urlstr(url.password), 'EMAIL_HOST': url.hostname, 'EMAIL_PORT': _cast_int(url.port), }) if backend: config['EMAIL_BACKEND'] = backend elif url.scheme not in cls.EMAIL_SCHEMES: raise ImproperlyConfigured('Invalid email schema %s' % url.scheme) elif url.scheme in cls.EMAIL_SCHEMES: config['EMAIL_BACKEND'] = cls.EMAIL_SCHEMES[url.scheme] if url.scheme in ('smtps', 'smtp+tls'): config['EMAIL_USE_TLS'] = True elif url.scheme == 'smtp+ssl': config['EMAIL_USE_SSL'] = True if url.query: config_options = {} for k, v in parse_qs(url.query).items(): opt = {k.upper(): _cast_int(v[0])} if k.upper() in cls._EMAIL_BASE_OPTIONS: config.update(opt) else: config_options.update(opt) config['OPTIONS'] = config_options return config
[ "def", "email_url_config", "(", "cls", ",", "url", ",", "backend", "=", "None", ")", ":", "config", "=", "{", "}", "url", "=", "urlparse", "(", "url", ")", "if", "not", "isinstance", "(", "url", ",", "cls", ".", "URL_CLASS", ")", "else", "url", "# ...
33.813953
17.534884
def get_compiler(self, using=None, connection=None): """ Overrides the Query method get_compiler in order to return an instance of the above custom compiler. """ # Copy the body of this method from Django except the final # return statement. We will ignore code coverage for this. if using is None and connection is None: # pragma: no cover raise ValueError("Need either using or connection") if using: connection = connections[using] # Check that the compiler will be able to execute the query for alias, aggregate in self.annotation_select.items(): connection.ops.check_expression_support(aggregate) # Instantiate the custom compiler. return { CTEUpdateQuery: CTEUpdateQueryCompiler, CTEInsertQuery: CTEInsertQueryCompiler, CTEDeleteQuery: CTEDeleteQueryCompiler, CTEAggregateQuery: CTEAggregateQueryCompiler, }.get(self.__class__, CTEQueryCompiler)(self, connection, using)
[ "def", "get_compiler", "(", "self", ",", "using", "=", "None", ",", "connection", "=", "None", ")", ":", "# Copy the body of this method from Django except the final", "# return statement. We will ignore code coverage for this.", "if", "using", "is", "None", "and", "connect...
52.1
16.75
def update_prompt(self, name, new_template=None): """This is called when a prompt template is updated. It processes abbreviations used in the prompt template (like \#) and calculates how many invisible characters (ANSI colour escapes) the resulting prompt contains. It is also called for each prompt on changing the colour scheme. In both cases, traitlets should take care of calling this automatically. """ if new_template is not None: self.templates[name] = multiple_replace(prompt_abbreviations, new_template) # We count invisible characters (colour escapes) on the last line of the # prompt, to calculate the width for lining up subsequent prompts. invis_chars = _lenlastline(self._render(name, color=True)) - \ _lenlastline(self._render(name, color=False)) self.invisible_chars[name] = invis_chars
[ "def", "update_prompt", "(", "self", ",", "name", ",", "new_template", "=", "None", ")", ":", "if", "new_template", "is", "not", "None", ":", "self", ".", "templates", "[", "name", "]", "=", "multiple_replace", "(", "prompt_abbreviations", ",", "new_template...
58
25.125
def skip(self, num_bytes): """Jump the ahead the specified bytes in the buffer.""" if num_bytes is None: self._offset = len(self._data) else: self._offset += num_bytes
[ "def", "skip", "(", "self", ",", "num_bytes", ")", ":", "if", "num_bytes", "is", "None", ":", "self", ".", "_offset", "=", "len", "(", "self", ".", "_data", ")", "else", ":", "self", ".", "_offset", "+=", "num_bytes" ]
35
9.5
def __get_rev(self, key, version, **kwa): '''Obtain particular version of the doc at key.''' if '_doc' in kwa: doc = kwa['_doc'] else: if type(version) is int: if version == 0: order = pymongo.ASCENDING elif version == -1: order = pymongo.DESCENDING doc = self._collection.find_one({'k': key}, sort=[['d', order]]) elif type(version) is datetime: ver = self.__round_time(version) doc = self._collection.find_one({'k': key, 'd': ver}) if doc is None: raise KeyError('Supplied key `{0}` or version `{1}` does not exist' .format(key, str(version))) coded_val = doc['v'] return pickle.loads(coded_val)
[ "def", "__get_rev", "(", "self", ",", "key", ",", "version", ",", "*", "*", "kwa", ")", ":", "if", "'_doc'", "in", "kwa", ":", "doc", "=", "kwa", "[", "'_doc'", "]", "else", ":", "if", "type", "(", "version", ")", "is", "int", ":", "if", "versi...
33.190476
16.428571
def apply_translation(self, offset): """ Apply a transformation matrix to the current path in- place Parameters ----------- offset : float or (3,) float Translation to be applied to mesh """ # work on 2D and 3D paths dimension = self.vertices.shape[1] # make sure offset is correct length and type offset = np.array( offset, dtype=np.float64).reshape(dimension) # create a homogenous transform matrix = np.eye(dimension + 1) # apply the offset matrix[:dimension, dimension] = offset self.apply_transform(matrix)
[ "def", "apply_translation", "(", "self", ",", "offset", ")", ":", "# work on 2D and 3D paths", "dimension", "=", "self", ".", "vertices", ".", "shape", "[", "1", "]", "# make sure offset is correct length and type", "offset", "=", "np", ".", "array", "(", "offset"...
31.8
12
def normalize_feature_objects(feature_objs): """Takes an iterable of GeoJSON-like Feature mappings or an iterable of objects with a geo interface and normalizes it to the former.""" for obj in feature_objs: if hasattr(obj, "__geo_interface__") and \ 'type' in obj.__geo_interface__.keys() and \ obj.__geo_interface__['type'] == 'Feature': yield obj.__geo_interface__ elif isinstance(obj, dict) and 'type' in obj and \ obj['type'] == 'Feature': yield obj else: raise ValueError("Did not recognize object {0}" "as GeoJSON Feature".format(obj))
[ "def", "normalize_feature_objects", "(", "feature_objs", ")", ":", "for", "obj", "in", "feature_objs", ":", "if", "hasattr", "(", "obj", ",", "\"__geo_interface__\"", ")", "and", "'type'", "in", "obj", ".", "__geo_interface__", ".", "keys", "(", ")", "and", ...
44.666667
11.533333
def pretty_duration(seconds): """ Returns a user-friendly representation of the provided duration in seconds. For example: 62.8 => "1m2.8s", or 129837.8 => "2d12h4m57.8s" """ if seconds is None: return '' ret = '' if seconds >= 86400: ret += '{:.0f}d'.format(int(seconds / 86400)) seconds = seconds % 86400 if seconds >= 3600: ret += '{:.0f}h'.format(int(seconds / 3600)) seconds = seconds % 3600 if seconds >= 60: ret += '{:.0f}m'.format(int(seconds / 60)) seconds = seconds % 60 if seconds > 0: ret += '{:.1f}s'.format(seconds) return ret
[ "def", "pretty_duration", "(", "seconds", ")", ":", "if", "seconds", "is", "None", ":", "return", "''", "ret", "=", "''", "if", "seconds", ">=", "86400", ":", "ret", "+=", "'{:.0f}d'", ".", "format", "(", "int", "(", "seconds", "/", "86400", ")", ")"...
32.947368
13.789474
def _evaluate(self,R,z,phi=0.,t=0.): """ NAME: _evaluate PURPOSE: evaluate the potential at R,phi,t INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: Phi(R,z,phi,t) HISTORY: 2010-11-24 - Started - Bovy (NYU) """ #Calculate relevant time if t < self._tform: smooth= 0. elif t < self._tsteady: deltat= t-self._tform xi= 2.*deltat/(self._tsteady-self._tform)-1. smooth= (3./16.*xi**5.-5./8*xi**3.+15./16.*xi+.5) else: #bar is fully on smooth= 1. r2= R**2.+z**2. r= numpy.sqrt(r2) if r <= self._rb: return self._af*smooth*numpy.cos(2.*(phi-self._omegab*t-self._barphi))\ *((r/self._rb)**3.-2.)*R**2./r2 else: return -self._af*smooth*numpy.cos(2.*(phi-self._omegab*t- self._barphi))\ *(self._rb/r)**3.\ *R**2./r2
[ "def", "_evaluate", "(", "self", ",", "R", ",", "z", ",", "phi", "=", "0.", ",", "t", "=", "0.", ")", ":", "#Calculate relevant time", "if", "t", "<", "self", ".", "_tform", ":", "smooth", "=", "0.", "elif", "t", "<", "self", ".", "_tsteady", ":"...
33.628571
16.371429
async def kick(self, user_id: base.Integer, until_date: typing.Union[base.Integer, None] = None): """ Use this method to kick a user from a group, a supergroup or a channel. In the case of supergroups and channels, the user will not be able to return to the group on their own using invite links, etc., unless unbanned first. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Note: In regular groups (non-supergroups), this method will only work if the ‘All Members Are Admins’ setting is off in the target group. Otherwise members may only be removed by the group's creator or by the member that added them. Source: https://core.telegram.org/bots/api#kickchatmember :param user_id: Unique identifier of the target user :type user_id: :obj:`base.Integer` :param until_date: Date when the user will be unbanned, unix time. :type until_date: :obj:`typing.Union[base.Integer, None]` :return: Returns True on success. :rtype: :obj:`base.Boolean` """ return await self.bot.kick_chat_member(self.id, user_id=user_id, until_date=until_date)
[ "async", "def", "kick", "(", "self", ",", "user_id", ":", "base", ".", "Integer", ",", "until_date", ":", "typing", ".", "Union", "[", "base", ".", "Integer", ",", "None", "]", "=", "None", ")", ":", "return", "await", "self", ".", "bot", ".", "kic...
53.304348
30.608696
def publish(message, exchange=None): """ Publish a message to an exchange. This is a synchronous call, meaning that when this function returns, an acknowledgment has been received from the message broker and you can be certain the message was published successfully. There are some cases where an error occurs despite your message being successfully published. For example, if a network partition occurs after the message is received by the broker. Therefore, you may publish duplicate messages. For complete details, see the :ref:`publishing` documentation. >>> from fedora_messaging import api >>> message = api.Message(body={'Hello': 'world'}, topic='Hi') >>> api.publish(message) If an attempt to publish fails because the broker rejects the message, it is not retried. Connection attempts to the broker can be configured using the "connection_attempts" and "retry_delay" options in the broker URL. See :class:`pika.connection.URLParameters` for details. Args: message (message.Message): The message to publish. exchange (str): The name of the AMQP exchange to publish to; defaults to :ref:`conf-publish-exchange` Raises: fedora_messaging.exceptions.PublishReturned: Raised if the broker rejects the message. fedora_messaging.exceptions.ConnectionException: Raised if a connection error occurred before the publish confirmation arrived. fedora_messaging.exceptions.ValidationError: Raised if the message fails validation with its JSON schema. This only depends on the message you are trying to send, the AMQP server is not involved. """ pre_publish_signal.send(publish, message=message) if exchange is None: exchange = config.conf["publish_exchange"] global _session_cache if not hasattr(_session_cache, "session"): _session_cache.session = _session.PublisherSession() try: _session_cache.session.publish(message, exchange=exchange) publish_signal.send(publish, message=message) except exceptions.PublishException as e: publish_failed_signal.send(publish, message=message, reason=e) raise
[ "def", "publish", "(", "message", ",", "exchange", "=", "None", ")", ":", "pre_publish_signal", ".", "send", "(", "publish", ",", "message", "=", "message", ")", "if", "exchange", "is", "None", ":", "exchange", "=", "config", ".", "conf", "[", "\"publish...
43.117647
25.941176
def add_section(self, name): """Append `section` to model Arguments: name (str): Name of section """ assert isinstance(name, str) # Skip existing sections for section in self.sections: if section.name == name: return section item = defaults["common"].copy() item["name"] = name item["itemType"] = "section" item = self.add_item(item) self.sections.append(item) return item
[ "def", "add_section", "(", "self", ",", "name", ")", ":", "assert", "isinstance", "(", "name", ",", "str", ")", "# Skip existing sections", "for", "section", "in", "self", ".", "sections", ":", "if", "section", ".", "name", "==", "name", ":", "return", "...
21.434783
17.130435
def print_commandless_help(self): """ print_commandless_help """ doc_help = self.m_doc.strip().split("\n") if len(doc_help) > 0: print("\033[33m--\033[0m") print("\033[34m" + doc_help[0] + "\033[0m") asp = "author :" doc_help_rest = "\n".join(doc_help[1:]) if asp in doc_help_rest: doc_help_rest = doc_help_rest.split("author :") if len(doc_help_rest) > 1: print("\n\033[33m" + doc_help_rest[0].strip() + "\n") print("\033[37m" + asp + doc_help_rest[1] + "\033[0m") else: print(doc_help_rest) else: print(doc_help_rest) print("\033[33m--\033[0m") else: print("\033[31mERROR, doc should have more then one line\033[0m") print(self.m_doc)
[ "def", "print_commandless_help", "(", "self", ")", ":", "doc_help", "=", "self", ".", "m_doc", ".", "strip", "(", ")", ".", "split", "(", "\"\\n\"", ")", "if", "len", "(", "doc_help", ")", ">", "0", ":", "print", "(", "\"\\033[33m--\\033[0m\"", ")", "p...
33.185185
16.888889
def get_region_products(self, region): """获得指定区域的产品信息 Args: - region: 区域,如:"nq" Returns: 返回该区域的产品信息,若失败则返回None """ regions, retInfo = self.list_regions() if regions is None: return None for r in regions: if r.get('name') == region: return r.get('products')
[ "def", "get_region_products", "(", "self", ",", "region", ")", ":", "regions", ",", "retInfo", "=", "self", ".", "list_regions", "(", ")", "if", "regions", "is", "None", ":", "return", "None", "for", "r", "in", "regions", ":", "if", "r", ".", "get", ...
21.411765
16.529412
def sample(self, bqm, chain_strength=1.0, chain_break_fraction=True, **parameters): """Sample the binary quadratic model. Note: At the initial sample(..) call, it will find a suitable embedding and initialize the remaining attributes before sampling the bqm. All following sample(..) calls will reuse that initial embedding. Args: bqm (:obj:`dimod.BinaryQuadraticModel`): Binary quadratic model to be sampled from. chain_strength (float, optional, default=1.0): Magnitude of the quadratic bias (in SPIN-space) applied between variables to create chains. Note that the energy penalty of chain breaks is 2 * `chain_strength`. chain_break_fraction (bool, optional, default=True): If True, a ‘chain_break_fraction’ field is added to the unembedded response which report what fraction of the chains were broken before unembedding. **parameters: Parameters for the sampling method, specified by the child sampler. Returns: :class:`dimod.SampleSet` """ if self.embedding is None: # Find embedding child = self.child # Solve the problem on the child system __, target_edgelist, target_adjacency = child.structure source_edgelist = list(bqm.quadratic) + [(v, v) for v in bqm.linear] # Add self-loops for single variables embedding = minorminer.find_embedding(source_edgelist, target_edgelist) # Initialize properties that need embedding super(LazyFixedEmbeddingComposite, self)._set_graph_related_init(embedding=embedding) return super(LazyFixedEmbeddingComposite, self).sample(bqm, chain_strength=chain_strength, chain_break_fraction=chain_break_fraction, **parameters)
[ "def", "sample", "(", "self", ",", "bqm", ",", "chain_strength", "=", "1.0", ",", "chain_break_fraction", "=", "True", ",", "*", "*", "parameters", ")", ":", "if", "self", ".", "embedding", "is", "None", ":", "# Find embedding", "child", "=", "self", "."...
54.4
36.114286
def hourly_horizontal_infrared(self): """A data collection containing hourly horizontal infrared intensity in W/m2. """ sky_cover = self._sky_condition.hourly_sky_cover db_temp = self._dry_bulb_condition.hourly_values dp_temp = self._humidity_condition.hourly_dew_point_values( self._dry_bulb_condition) horiz_ir = [] for i in xrange(len(sky_cover)): horiz_ir.append( calc_horizontal_infrared(sky_cover[i], db_temp[i], dp_temp[i])) return self._get_daily_data_collections( energyflux.HorizontalInfraredRadiationIntensity(), 'W/m2', horiz_ir)
[ "def", "hourly_horizontal_infrared", "(", "self", ")", ":", "sky_cover", "=", "self", ".", "_sky_condition", ".", "hourly_sky_cover", "db_temp", "=", "self", ".", "_dry_bulb_condition", ".", "hourly_values", "dp_temp", "=", "self", ".", "_humidity_condition", ".", ...
43
17.533333
def format_number(x): """Format number to string Function converts a number to string. For numbers of class :class:`float`, up to 17 digits will be used to print the entire floating point number. Any padding zeros will be removed at the end of the number. See :ref:`user-guide:int` and :ref:`user-guide:double` for more information on the format. .. note:: IEEE754-1985 standard says that 17 significant decimal digits are required to adequately represent a 64-bit floating point number. Not all fractional numbers can be exactly represented in floating point. An example is 0.1 which will be approximated as 0.10000000000000001. Parameters ---------- x : :class:`int` or :class:`float` Number to convert to string Returns ------- vector : :class:`str` String of number :obj:`x` """ if isinstance(x, float): # Helps prevent loss of precision as using str() in Python 2 only prints 12 digits of precision. # However, IEEE754-1985 standard says that 17 significant decimal digits is required to adequately represent a # floating point number. # The g option is used rather than f because g precision uses significant digits while f is just the number of # digits after the decimal. (NRRD C implementation uses g). value = '{:.17g}'.format(x) else: value = str(x) return value
[ "def", "format_number", "(", "x", ")", ":", "if", "isinstance", "(", "x", ",", "float", ")", ":", "# Helps prevent loss of precision as using str() in Python 2 only prints 12 digits of precision.", "# However, IEEE754-1985 standard says that 17 significant decimal digits is required to...
40.4
33.8
def set(self, key, value): """ Sets a single value in a preconfigured data file. Arguments: key -- The full dot-notated key to set the value for. value -- The value to set. """ d = self.data.data keys = key.split('.') latest = keys.pop() for k in keys: d = d.setdefault(k, {}) schema = Schema().load(self.schema_file) self.data.internal = schema.internal self.parse_value(d, '', key, value, schema.get(key)) self.data.save(self.data_file)
[ "def", "set", "(", "self", ",", "key", ",", "value", ")", ":", "d", "=", "self", ".", "data", ".", "data", "keys", "=", "key", ".", "split", "(", "'.'", ")", "latest", "=", "keys", ".", "pop", "(", ")", "for", "k", "in", "keys", ":", "d", "...
30.666667
15.222222
def svg2paths2(svg_file_location, return_svg_attributes=True, convert_circles_to_paths=True, convert_ellipses_to_paths=True, convert_lines_to_paths=True, convert_polylines_to_paths=True, convert_polygons_to_paths=True, convert_rectangles_to_paths=True): """Convenience function; identical to svg2paths() except that return_svg_attributes=True by default. See svg2paths() docstring for more info.""" return svg2paths(svg_file_location=svg_file_location, return_svg_attributes=return_svg_attributes, convert_circles_to_paths=convert_circles_to_paths, convert_ellipses_to_paths=convert_ellipses_to_paths, convert_lines_to_paths=convert_lines_to_paths, convert_polylines_to_paths=convert_polylines_to_paths, convert_polygons_to_paths=convert_polygons_to_paths, convert_rectangles_to_paths=convert_rectangles_to_paths)
[ "def", "svg2paths2", "(", "svg_file_location", ",", "return_svg_attributes", "=", "True", ",", "convert_circles_to_paths", "=", "True", ",", "convert_ellipses_to_paths", "=", "True", ",", "convert_lines_to_paths", "=", "True", ",", "convert_polylines_to_paths", "=", "Tr...
56
16.894737
def _serialize(self, include_run_logs=False, strict_json=False): """ Serialize a representation of this Job to a Python dict object. """ # return tasks in sorted order if graph is in a valid state try: topo_sorted = self.topological_sort() t = [self.tasks[task]._serialize(include_run_logs=include_run_logs, strict_json=strict_json) for task in topo_sorted] except: t = [task._serialize(include_run_logs=include_run_logs, strict_json=strict_json) for task in self.tasks.itervalues()] dependencies = {} for k, v in self.graph.iteritems(): dependencies[k] = list(v) result = {'job_id': self.job_id, 'name': self.name, 'parent_id': self.parent.dagobah_id, 'tasks': t, 'dependencies': dependencies, 'status': self.state.status, 'cron_schedule': self.cron_schedule, 'next_run': self.next_run, 'notes': self.notes} if strict_json: result = json.loads(json.dumps(result, cls=StrictJSONEncoder)) return result
[ "def", "_serialize", "(", "self", ",", "include_run_logs", "=", "False", ",", "strict_json", "=", "False", ")", ":", "# return tasks in sorted order if graph is in a valid state", "try", ":", "topo_sorted", "=", "self", ".", "topological_sort", "(", ")", "t", "=", ...
40.741935
17.806452
def obj_with_unit(obj, unit): """ Returns a `FloatWithUnit` instance if obj is scalar, a dictionary of objects with units if obj is a dict, else an instance of `ArrayWithFloatWithUnit`. Args: unit: Specific units (eV, Ha, m, ang, etc.). """ unit_type = _UNAME2UTYPE[unit] if isinstance(obj, numbers.Number): return FloatWithUnit(obj, unit=unit, unit_type=unit_type) elif isinstance(obj, collections.Mapping): return {k: obj_with_unit(v, unit) for k,v in obj.items()} else: return ArrayWithUnit(obj, unit=unit, unit_type=unit_type)
[ "def", "obj_with_unit", "(", "obj", ",", "unit", ")", ":", "unit_type", "=", "_UNAME2UTYPE", "[", "unit", "]", "if", "isinstance", "(", "obj", ",", "numbers", ".", "Number", ")", ":", "return", "FloatWithUnit", "(", "obj", ",", "unit", "=", "unit", ","...
34.588235
18.588235
def _bracket(self, qinit, f0, fun): """Find a bracket that does contain the minimum""" self.num_bracket = 0 qa = qinit fa = fun(qa) counter = 0 if fa >= f0: while True: self.num_bracket += 1 #print " bracket shrink" qb, fb = qa, fa qa /= 1+phi fa = fun(qa) if qa < self.qtol: return if fa < f0: return (0, f0), (qa, fa), (qb, fb) counter += 1 if self.max_iter is not None and counter > self.max_iter: return else: self.num_bracket += 1 #print " bracket grow1" qb, fb = qa, fa qa *= (1+phi) fa = fun(qa) if fa >= fb: return (0, f0), (qb, fb), (qa, fa) while True: self.num_bracket += 1 #print " bracket grow2" qc, fc = qb, fb qb, fb = qa, fa qa = qb*(1+phi) - qc fa = fun(qa) if fa >= fb: return (qc, fc), (qb, fb), (qa, fa) counter += 1 if self.max_iter is not None and counter > self.max_iter: return
[ "def", "_bracket", "(", "self", ",", "qinit", ",", "f0", ",", "fun", ")", ":", "self", ".", "num_bracket", "=", "0", "qa", "=", "qinit", "fa", "=", "fun", "(", "qa", ")", "counter", "=", "0", "if", "fa", ">=", "f0", ":", "while", "True", ":", ...
33.175
12.775
def _parse_params(self, params=None): """ Parse parameters. Combine default and user-defined parameters. """ prm = self.default_params.copy() if params is not None: prm.update(params) if prm["background"]: # Absolute path, just to be sure prm["background"] = os.path.abspath(prm["background"]) prm["background"] = " --negSet {0} ".format( prm["background"]) prm["strand"] = "" if not prm["single"]: prm["strand"] = " --revcomp " return prm
[ "def", "_parse_params", "(", "self", ",", "params", "=", "None", ")", ":", "prm", "=", "self", ".", "default_params", ".", "copy", "(", ")", "if", "params", "is", "not", "None", ":", "prm", ".", "update", "(", "params", ")", "if", "prm", "[", "\"ba...
28.285714
14.666667
def _tag_most_likely(examples): """ Return a list of date elements by choosing the most likely element for a token within examples (context-free). """ tokenized_examples = [_tokenize_by_character_class(example) for example in examples] # We currently need the tokenized_examples to all have the same length, so drop instances that have a length # that does not equal the mode of lengths within tokenized_examples token_lengths = [len(e) for e in tokenized_examples] token_lengths_mode = _mode(token_lengths) tokenized_examples = [example for example in tokenized_examples if len(example) == token_lengths_mode] # Now, we iterate through the tokens, assigning date elements based on their likelihood. In cases where # the assignments are unlikely for all date elements, assign filler. most_likely = [] for token_index in range(0, token_lengths_mode): tokens = [token[token_index] for token in tokenized_examples] probabilities = _percent_match(DATE_ELEMENTS, tokens) max_prob = max(probabilities) if max_prob < 0.5: most_likely.append(Filler(_mode(tokens))) else: if probabilities.count(max_prob) == 1: most_likely.append(DATE_ELEMENTS[probabilities.index(max_prob)]) else: choices = [] for index, prob in enumerate(probabilities): if prob == max_prob: choices.append(DATE_ELEMENTS[index]) most_likely.append(_most_restrictive(choices)) return most_likely
[ "def", "_tag_most_likely", "(", "examples", ")", ":", "tokenized_examples", "=", "[", "_tokenize_by_character_class", "(", "example", ")", "for", "example", "in", "examples", "]", "# We currently need the tokenized_examples to all have the same length, so drop instances that have...
48.9375
26.375
def _hz_to_semitones(self, hz): """ Convert hertz into a number of semitones above or below some reference value, in this case, A440 """ return np.log(hz / self._a440) / np.log(self._a)
[ "def", "_hz_to_semitones", "(", "self", ",", "hz", ")", ":", "return", "np", ".", "log", "(", "hz", "/", "self", ".", "_a440", ")", "/", "np", ".", "log", "(", "self", ".", "_a", ")" ]
36.666667
11.666667
def cli(env, quote): """View a quote""" manager = ordering.OrderingManager(env.client) result = manager.get_quote_details(quote) package = result['order']['items'][0]['package'] title = "{} - Package: {}, Id {}".format(result.get('name'), package['keyName'], package['id']) table = formatting.Table([ 'Category', 'Description', 'Quantity', 'Recurring', 'One Time' ], title=title) table.align['Category'] = 'l' table.align['Description'] = 'l' items = lookup(result, 'order', 'items') for item in items: table.add_row([ item.get('categoryCode'), item.get('description'), item.get('quantity'), item.get('recurringFee'), item.get('oneTimeFee') ]) env.fout(table)
[ "def", "cli", "(", "env", ",", "quote", ")", ":", "manager", "=", "ordering", ".", "OrderingManager", "(", "env", ".", "client", ")", "result", "=", "manager", ".", "get_quote_details", "(", "quote", ")", "package", "=", "result", "[", "'order'", "]", ...
30.84
18.04