text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def _wrap_callback_parse_link_event(subscription, on_data, message): """ Wraps a user callback to parse LinkEvents from a WebSocket data message """ if message.type == message.DATA: if message.data.type == yamcs_pb2.LINK_EVENT: link_message = getattr(message.data, 'linkEvent') link_event = LinkEvent(link_message) #pylint: disable=protected-access subscription._process(link_event) if on_data: on_data(link_event)
[ "def", "_wrap_callback_parse_link_event", "(", "subscription", ",", "on_data", ",", "message", ")", ":", "if", "message", ".", "type", "==", "message", ".", "DATA", ":", "if", "message", ".", "data", ".", "type", "==", "yamcs_pb2", ".", "LINK_EVENT", ":", ...
38.923077
9.076923
def _load_cache(self): """ the method is implemented for the purpose of optimization, byte positions will not be re-read from a file that has already been used, if the content of the file has changed, and the name has been left the same, the old version of byte offsets will be loaded :return: list of byte offsets from existing file """ try: with open(self.__cache_path, 'rb') as f: return load(f) except FileNotFoundError: return except IsADirectoryError as e: raise IsADirectoryError(f'Please delete {self.__cache_path} directory') from e except (UnpicklingError, EOFError) as e: raise UnpicklingError(f'Invalid cache file {self.__cache_path}. Please delete it') from e
[ "def", "_load_cache", "(", "self", ")", ":", "try", ":", "with", "open", "(", "self", ".", "__cache_path", ",", "'rb'", ")", "as", "f", ":", "return", "load", "(", "f", ")", "except", "FileNotFoundError", ":", "return", "except", "IsADirectoryError", "as...
50.0625
24.5625
def change_db(self, db, user=None): """Change connect database.""" # Get original config and change database key config = self._config config['database'] = db if user: config['user'] = user self.database = db # Close current database connection self._disconnect() # Reconnect to the new database self._connect(config)
[ "def", "change_db", "(", "self", ",", "db", ",", "user", "=", "None", ")", ":", "# Get original config and change database key", "config", "=", "self", ".", "_config", "config", "[", "'database'", "]", "=", "db", "if", "user", ":", "config", "[", "'user'", ...
28.428571
13.714286
def get_values(item): """Extract value from regex hit.""" fracs = r'|'.join(r.UNI_FRAC) value = item.group(2) value = re.sub(ur'(?<=\d)(%s)10' % r.MULTIPLIERS, 'e', value) value = re.sub(fracs, callback, value, re.IGNORECASE) value = re.sub(' +', ' ', value) range_separator = re.findall(ur'\d+ ?(-|and|(?:- ?)?to) ?\d', value) uncer_separator = re.findall(ur'\d+ ?(\+/-|±) ?\d', value) fract_separator = re.findall(ur'\d+/\d+', value) uncertainty = None if range_separator: values = value.split(range_separator[0]) values = [float(re.sub(r'-$', '', i)) for i in values] elif uncer_separator: values = [float(i) for i in value.split(uncer_separator[0])] uncertainty = values[1] values = [values[0]] elif fract_separator: values = value.split() if len(values) > 1: values = [float(values[0]) + float(Fraction(values[1]))] else: values = [float(Fraction(values[0]))] else: values = [float(re.sub(r'-$', '', value))] logging.debug(u'\tUncertainty: %s', uncertainty) logging.debug(u'\tValues: %s', values) return uncertainty, values
[ "def", "get_values", "(", "item", ")", ":", "fracs", "=", "r'|'", ".", "join", "(", "r", ".", "UNI_FRAC", ")", "value", "=", "item", ".", "group", "(", "2", ")", "value", "=", "re", ".", "sub", "(", "ur'(?<=\\d)(%s)10'", "%", "r", ".", "MULTIPLIERS...
34.235294
19.088235
def create(dataset, target, features=None, l2_penalty=1e-2, l1_penalty=0.0, solver='auto', feature_rescaling=True, convergence_threshold = _DEFAULT_SOLVER_OPTIONS['convergence_threshold'], step_size = _DEFAULT_SOLVER_OPTIONS['step_size'], lbfgs_memory_level = _DEFAULT_SOLVER_OPTIONS['lbfgs_memory_level'], max_iterations = _DEFAULT_SOLVER_OPTIONS['max_iterations'], validation_set = "auto", verbose=True): """ Create a :class:`~turicreate.linear_regression.LinearRegression` to predict a scalar target variable as a linear function of one or more features. In addition to standard numeric and categorical types, features can also be extracted automatically from list- or dictionary-type SFrame columns. The linear regression module can be used for ridge regression, Lasso, and elastic net regression (see References for more detail on these methods). By default, this model has an l2 regularization weight of 0.01. Parameters ---------- dataset : SFrame The dataset to use for training the model. target : string Name of the column containing the target variable. features : list[string], optional Names of the columns containing features. 'None' (the default) indicates that all columns except the target variable should be used as features. The features are columns in the input SFrame that can be of the following types: - *Numeric*: values of numeric type integer or float. - *Categorical*: values of type string. - *Array*: list of numeric (integer or float) values. Each list element is treated as a separate feature in the model. - *Dictionary*: key-value pairs with numeric (integer or float) values Each key of a dictionary is treated as a separate feature and the value in the dictionary corresponds to the value of the feature. Dictionaries are ideal for representing sparse data. Columns of type *list* are not supported. Convert such feature columns to type array if all entries in the list are of numeric types. If the lists contain data of mixed types, separate them out into different columns. l2_penalty : float, optional Weight on the l2-regularizer of the model. The larger this weight, the more the model coefficients shrink toward 0. This introduces bias into the model but decreases variance, potentially leading to better predictions. The default value is 0.01; setting this parameter to 0 corresponds to unregularized linear regression. See the ridge regression reference for more detail. l1_penalty : float, optional Weight on l1 regularization of the model. Like the l2 penalty, the higher the l1 penalty, the more the estimated coefficients shrink toward 0. The l1 penalty, however, completely zeros out sufficiently small coefficients, automatically indicating features that are not useful for the model. The default weight of 0 prevents any features from being discarded. See the LASSO regression reference for more detail. solver : string, optional Solver to use for training the model. See the references for more detail on each solver. - *auto (default)*: automatically chooses the best solver for the data and model parameters. - *newton*: Newton-Raphson - *lbfgs*: limited memory BFGS - *fista*: accelerated gradient descent The model is trained using a carefully engineered collection of methods that are automatically picked based on the input data. The ``newton`` method works best for datasets with plenty of examples and few features (long datasets). Limited memory BFGS (``lbfgs``) is a robust solver for wide datasets (i.e datasets with many coefficients). ``fista`` is the default solver for l1-regularized linear regression. The solvers are all automatically tuned and the default options should function well. See the solver options guide for setting additional parameters for each of the solvers. See the user guide for additional details on how the solver is chosen. feature_rescaling : boolean, optional Feature rescaling is an important pre-processing step that ensures that all features are on the same scale. An l2-norm rescaling is performed to make sure that all features are of the same norm. Categorical features are also rescaled by rescaling the dummy variables that are used to represent them. The coefficients are returned in original scale of the problem. This process is particularly useful when features vary widely in their ranges. validation_set : SFrame, optional A dataset for monitoring the model's generalization performance. For each row of the progress table, the chosen metrics are computed for both the provided training dataset and the validation_set. The format of this SFrame must be the same as the training set. By default this argument is set to 'auto' and a validation set is automatically sampled and used for progress printing. If validation_set is set to None, then no additional metrics are computed. The default value is 'auto'. convergence_threshold : float, optional Convergence is tested using variation in the training objective. The variation in the training objective is calculated using the difference between the objective values between two steps. Consider reducing this below the default value (0.01) for a more accurately trained model. Beware of overfitting (i.e a model that works well only on the training data) if this parameter is set to a very low value. lbfgs_memory_level : int, optional The L-BFGS algorithm keeps track of gradient information from the previous ``lbfgs_memory_level`` iterations. The storage requirement for each of these gradients is the ``num_coefficients`` in the problem. Increasing the ``lbfgs_memory_level`` can help improve the quality of the model trained. Setting this to more than ``max_iterations`` has the same effect as setting it to ``max_iterations``. max_iterations : int, optional The maximum number of allowed passes through the data. More passes over the data can result in a more accurately trained model. Consider increasing this (the default value is 10) if the training accuracy is low and the *Grad-Norm* in the display is large. step_size : float, optional (fista only) The starting step size to use for the ``fista`` and ``gd`` solvers. The default is set to 1.0, this is an aggressive setting. If the first iteration takes a considerable amount of time, reducing this parameter may speed up model training. verbose : bool, optional If True, print progress updates. Returns ------- out : LinearRegression A trained model of type :class:`~turicreate.linear_regression.LinearRegression`. See Also -------- LinearRegression, turicreate.boosted_trees_regression.BoostedTreesRegression, turicreate.regression.create Notes ----- - Categorical variables are encoded by creating dummy variables. For a variable with :math:`K` categories, the encoding creates :math:`K-1` dummy variables, while the first category encountered in the data is used as the baseline. - For prediction and evaluation of linear regression models with sparse dictionary inputs, new keys/columns that were not seen during training are silently ignored. - Any 'None' values in the data will result in an error being thrown. - A constant term is automatically added for the model intercept. This term is not regularized. - Standard errors on coefficients are only available when `solver=newton` or when the default `auto` solver option chooses the newton method and if the number of examples in the training data is more than the number of coefficients. If standard errors cannot be estimated, a column of `None` values are returned. References ---------- - Hoerl, A.E. and Kennard, R.W. (1970) `Ridge regression: Biased Estimation for Nonorthogonal Problems <http://amstat.tandfonline.com/doi/abs/10.1080/00401706.1970.10488634>`_. Technometrics 12(1) pp.55-67 - Tibshirani, R. (1996) `Regression Shrinkage and Selection via the Lasso <h ttp://www.jstor.org/discover/10.2307/2346178?uid=3739256&uid=2&uid=4&sid=2 1104169934983>`_. Journal of the Royal Statistical Society. Series B (Methodological) 58(1) pp.267-288. - Zhu, C., et al. (1997) `Algorithm 778: L-BFGS-B: Fortran subroutines for large-scale bound-constrained optimization <https://dl.acm.org/citation.cfm?id=279236>`_. ACM Transactions on Mathematical Software 23(4) pp.550-560. - Barzilai, J. and Borwein, J. `Two-Point Step Size Gradient Methods <http://imajna.oxfordjournals.org/content/8/1/141.short>`_. IMA Journal of Numerical Analysis 8(1) pp.141-148. - Beck, A. and Teboulle, M. (2009) `A Fast Iterative Shrinkage-Thresholding Algorithm for Linear Inverse Problems <http://epubs.siam.org/doi/abs/10.1137/080716542>`_. SIAM Journal on Imaging Sciences 2(1) pp.183-202. - Zhang, T. (2004) `Solving large scale linear prediction problems using stochastic gradient descent algorithms <https://dl.acm.org/citation.cfm?id=1015332>`_. ICML '04: Proceedings of the twenty-first international conference on Machine learning p.116. Examples -------- Given an :class:`~turicreate.SFrame` ``sf`` with a list of columns [``feature_1`` ... ``feature_K``] denoting features and a target column ``target``, we can create a :class:`~turicreate.linear_regression.LinearRegression` as follows: >>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv') >>> model = turicreate.linear_regression.create(data, target='price', ... features=['bath', 'bedroom', 'size']) For ridge regression, we can set the ``l2_penalty`` parameter higher (the default is 0.01). For Lasso regression, we set the l1_penalty higher, and for elastic net, we set both to be higher. .. sourcecode:: python # Ridge regression >>> model_ridge = turicreate.linear_regression.create(data, 'price', l2_penalty=0.1) # Lasso >>> model_lasso = turicreate.linear_regression.create(data, 'price', l2_penalty=0., l1_penalty=1.0) # Elastic net regression >>> model_enet = turicreate.linear_regression.create(data, 'price', l2_penalty=0.5, l1_penalty=0.5) """ # Regression model names. model_name = "regression_linear_regression" solver = solver.lower() model = _sl.create(dataset, target, model_name, features=features, validation_set = validation_set, solver = solver, verbose = verbose, l2_penalty=l2_penalty, l1_penalty = l1_penalty, feature_rescaling = feature_rescaling, convergence_threshold = convergence_threshold, step_size = step_size, lbfgs_memory_level = lbfgs_memory_level, max_iterations = max_iterations) return LinearRegression(model.__proxy__)
[ "def", "create", "(", "dataset", ",", "target", ",", "features", "=", "None", ",", "l2_penalty", "=", "1e-2", ",", "l1_penalty", "=", "0.0", ",", "solver", "=", "'auto'", ",", "feature_rescaling", "=", "True", ",", "convergence_threshold", "=", "_DEFAULT_SOL...
44.992248
29.232558
def getSimulations(self, times, **kwargs): """ A generator to quickly access many simulations. The arguments are the same as for `getSimulation`. """ for t in times: yield self.getSimulation(t, **kwargs)
[ "def", "getSimulations", "(", "self", ",", "times", ",", "*", "*", "kwargs", ")", ":", "for", "t", "in", "times", ":", "yield", "self", ".", "getSimulation", "(", "t", ",", "*", "*", "kwargs", ")" ]
35.714286
8.857143
def setup_addon_register(self, harpoon): """Setup our addon register""" # Create the addon getter and register the crosshairs namespace self.addon_getter = AddonGetter() self.addon_getter.add_namespace("harpoon.crosshairs", Result.FieldSpec(), Addon.FieldSpec()) # Initiate the addons from our configuration register = Register(self.addon_getter, self) if "addons" in harpoon: addons = harpoon["addons"] if type(addons) in (MergedOptions, dict) or getattr(addons, "is_dict", False): spec = sb.dictof(sb.string_spec(), sb.listof(sb.string_spec())) meta = Meta(harpoon, []).at("addons") for namespace, adns in spec.normalise(meta, addons).items(): register.add_pairs(*[(namespace, adn) for adn in adns]) # Import our addons register.recursive_import_known() # Resolve our addons register.recursive_resolve_imported() return register
[ "def", "setup_addon_register", "(", "self", ",", "harpoon", ")", ":", "# Create the addon getter and register the crosshairs namespace", "self", ".", "addon_getter", "=", "AddonGetter", "(", ")", "self", ".", "addon_getter", ".", "add_namespace", "(", "\"harpoon.crosshair...
41.666667
22.833333
def _pop_none(self, kwargs): """Remove default values (anything where the value is None). click is unfortunately bad at the way it sends through unspecified defaults.""" for key, value in copy(kwargs).items(): # options with multiple=True return a tuple if value is None or value == (): kwargs.pop(key) if hasattr(value, 'read'): kwargs[key] = value.read()
[ "def", "_pop_none", "(", "self", ",", "kwargs", ")", ":", "for", "key", ",", "value", "in", "copy", "(", "kwargs", ")", ".", "items", "(", ")", ":", "# options with multiple=True return a tuple", "if", "value", "is", "None", "or", "value", "==", "(", ")"...
48.888889
5.666667
def to_pandas(self): """Convert to pandas MultiIndex. Returns ------- pandas.base.MultiIndex """ if not all(ind.is_raw() for ind in self.values): raise ValueError('Cannot convert to pandas MultiIndex if not evaluated.') from pandas import MultiIndex as PandasMultiIndex arrays = [ind.values for ind in self.values] return PandasMultiIndex.from_arrays(arrays, names=self.names)
[ "def", "to_pandas", "(", "self", ")", ":", "if", "not", "all", "(", "ind", ".", "is_raw", "(", ")", "for", "ind", "in", "self", ".", "values", ")", ":", "raise", "ValueError", "(", "'Cannot convert to pandas MultiIndex if not evaluated.'", ")", "from", "pand...
28.125
24.9375
def stream(repo_uri, stream_uri, verbose, assume, sort, before=None, after=None): """Stream git history policy changes to destination. Default stream destination is a summary of the policy changes to stdout, one per line. Also supported for stdout streaming is `jsonline`. AWS Kinesis and SQS destinations are specified by providing the ARN. Database destinations are supported by providing a sqlalchemy DSN. Note SQLAlchemy and db drivers must be installed separately as they an optional dependency. When using database destinations, streaming defaults to incremental. """ logging.basicConfig( format="%(asctime)s: %(name)s:%(levelname)s %(message)s", level=(verbose and logging.DEBUG or logging.INFO)) logging.getLogger('botocore').setLevel(logging.WARNING) if before: before = parse(before) if after: after = parse(after) if sort: sort = six.moves.reduce(operator.or_, [SORT_TYPE[s] for s in sort]) with contextlib.closing(TempDir().open()) as temp_dir: if repo_uri is None: repo_uri = pygit2.discover_repository(os.getcwd()) log.debug("Using repository %s", repo_uri) if repo_uri.startswith('http') or repo_uri.startswith('git@'): log.info("Cloning repository: %s", repo_uri) repo = pygit2.clone_repository(repo_uri, temp_dir.path) else: repo = pygit2.Repository(repo_uri) load_resources() policy_repo = PolicyRepo(repo_uri, repo) change_count = 0 with contextlib.closing(transport(stream_uri, assume)) as t: if after is None and isinstance(t, IndexedTransport): after = t.last() for change in policy_repo.delta_stream(after=after, before=before): change_count += 1 t.send(change) log.info("Streamed %d policy repo changes", change_count) return change_count
[ "def", "stream", "(", "repo_uri", ",", "stream_uri", ",", "verbose", ",", "assume", ",", "sort", ",", "before", "=", "None", ",", "after", "=", "None", ")", ":", "logging", ".", "basicConfig", "(", "format", "=", "\"%(asctime)s: %(name)s:%(levelname)s %(messag...
39.244898
24.77551
def shutdown(self, how=socket.SHUT_RDWR): """ Send a shutdown signal for both reading and writing, or whatever socket.SHUT_* constant you like. Shutdown differs from closing in that it explicitly changes the state of the socket resource to closed, whereas closing will only decrement the number of peers on this end of the socket, since sockets can be a resource shared by multiple peers on a single OS. When the number of peers reaches zero, the socket is closed, but not deallocated, so you still need to call close. (except that this is python and close is automatically called on the deletion of the socket) http://stackoverflow.com/questions/409783/socket-shutdown-vs-socket-close """ if self._sock_send is not None: self._sock_send.shutdown(how) return self.sock.shutdown(how)
[ "def", "shutdown", "(", "self", ",", "how", "=", "socket", ".", "SHUT_RDWR", ")", ":", "if", "self", ".", "_sock_send", "is", "not", "None", ":", "self", ".", "_sock_send", ".", "shutdown", "(", "how", ")", "return", "self", ".", "sock", ".", "shutdo...
49.5
21.944444
def grabEmails(emails=None, emailsFile=None, nicks=None, nicksFile=None, domains=EMAIL_DOMAINS, excludeDomains=[]): """ Method that generates a list of emails. Args: ----- emails: Any premade list of emails. emailsFile: Filepath to the emails file (one per line). nicks: A list of aliases. nicksFile: Filepath to the aliases file (one per line). domains: Domains where the aliases will be tested. excludeDomains: Domains to be excluded from the created list. Returns: -------- list: the list of emails that will be verified. """ email_candidates = [] if emails != None: email_candidates = emails elif emailsFile != None: # Reading the emails file with open(emailsFile, "r") as iF: email_candidates = iF.read().splitlines() elif nicks != None: # Iterating the list of nicks for n in nicks: # Iterating the list of possible domains to build the emails for d in domains: if d not in excludeDomains: email_candidates.append(n+"@"+d) elif nicksFile != None: # Reading the list of nicks with open(nicksFile, "r") as iF: nicks = iF.read().splitlines() # Iterating the list of nicks for n in nicks: # Iterating the list of possible domains to build the emails for d in domains: if d not in excludeDomains: email_candidates.append(n+"@"+d) return email_candidates
[ "def", "grabEmails", "(", "emails", "=", "None", ",", "emailsFile", "=", "None", ",", "nicks", "=", "None", ",", "nicksFile", "=", "None", ",", "domains", "=", "EMAIL_DOMAINS", ",", "excludeDomains", "=", "[", "]", ")", ":", "email_candidates", "=", "[",...
36.348837
16.627907
def patch_memcache(): """Monkey patch python-memcached to implement our consistent hashring in its node selection and operations. """ def _init(self, servers, *k, **kw): self._old_init(servers, *k, **kw) nodes = {} for server in self.servers: conf = { 'hostname': server.ip, 'instance': server, 'port': server.port, 'weight': server.weight } nodes[server.ip] = conf self.uhashring = HashRing(nodes) def _get_server(self, key): if isinstance(key, tuple): return self._old_get_server(key) for i in range(self._SERVER_RETRIES): for node in self.uhashring.range(key): if node['instance'].connect(): return node['instance'], key return None, None memcache = __import__('memcache') memcache.Client._old_get_server = memcache.Client._get_server memcache.Client._old_init = memcache.Client.__init__ memcache.Client.__init__ = _init memcache.Client._get_server = _get_server
[ "def", "patch_memcache", "(", ")", ":", "def", "_init", "(", "self", ",", "servers", ",", "*", "k", ",", "*", "*", "kw", ")", ":", "self", ".", "_old_init", "(", "servers", ",", "*", "k", ",", "*", "*", "kw", ")", "nodes", "=", "{", "}", "for...
31.228571
13.4
def _hash(secret: bytes, data: bytes, alg: str) -> bytes: """ Create a new HMAC hash. :param secret: The secret used when hashing data. :type secret: bytes :param data: The data to hash. :type data: bytes :param alg: The algorithm to use when hashing `data`. :type alg: str :return: New HMAC hash. :rtype: bytes """ algorithm = get_algorithm(alg) return hmac \ .new(secret, msg=data, digestmod=algorithm) \ .digest()
[ "def", "_hash", "(", "secret", ":", "bytes", ",", "data", ":", "bytes", ",", "alg", ":", "str", ")", "->", "bytes", ":", "algorithm", "=", "get_algorithm", "(", "alg", ")", "return", "hmac", ".", "new", "(", "secret", ",", "msg", "=", "data", ",", ...
27.588235
15.588235
def weave(target, advices, pointcut=None, depth=1, public=False): """Weave advices such as Advice objects.""" advices = ( advice if isinstance(advice, Advice) else Advice(advice) for advice in advices ) weave( target=target, advices=advices, pointcut=pointcut, depth=depth, public=public )
[ "def", "weave", "(", "target", ",", "advices", ",", "pointcut", "=", "None", ",", "depth", "=", "1", ",", "public", "=", "False", ")", ":", "advices", "=", "(", "advice", "if", "isinstance", "(", "advice", ",", "Advice", ")", "else", "Advice", "(", ...
30.666667
22.75
def main(self, args=None): """Enter filesystem service loop.""" if get_compat_0_1(): args = self.main_0_1_preamble() d = {'multithreaded': self.multithreaded and 1 or 0} d['fuse_args'] = args or self.fuse_args.assemble() for t in 'file_class', 'dir_class': if hasattr(self, t): getattr(self.methproxy, 'set_' + t)(getattr(self,t)) for a in self._attrs: b = a if get_compat_0_1() and a in self.compatmap: b = self.compatmap[a] if hasattr(self, b): c = '' if get_compat_0_1() and hasattr(self, a + '_compat_0_1'): c = '_compat_0_1' d[a] = ErrnoWrapper(self.lowwrap(a + c)) try: main(**d) except FuseError: if args or self.fuse_args.mount_expected(): raise
[ "def", "main", "(", "self", ",", "args", "=", "None", ")", ":", "if", "get_compat_0_1", "(", ")", ":", "args", "=", "self", ".", "main_0_1_preamble", "(", ")", "d", "=", "{", "'multithreaded'", ":", "self", ".", "multithreaded", "and", "1", "or", "0"...
31.964286
19.035714
def choice(self, other): '''(|) This combinator implements choice. The parser p | q first applies p. If it succeeds, the value of p is returned. If p fails **without consuming any input**, parser q is tried. NOTICE: without backtrack.''' @Parser def choice_parser(text, index): res = self(text, index) return res if res.status or res.index != index else other(text, index) return choice_parser
[ "def", "choice", "(", "self", ",", "other", ")", ":", "@", "Parser", "def", "choice_parser", "(", "text", ",", "index", ")", ":", "res", "=", "self", "(", "text", ",", "index", ")", "return", "res", "if", "res", ".", "status", "or", "res", ".", "...
46.4
18.8
def update(): ''' Update the cache file for the bucket. ''' metadata = _init() if S3_SYNC_ON_UPDATE: # sync the buckets to the local cache log.info('Syncing local cache from S3...') for saltenv, env_meta in six.iteritems(metadata): for bucket_files in _find_files(env_meta): for bucket, files in six.iteritems(bucket_files): for file_path in files: cached_file_path = _get_cached_file_name(bucket, saltenv, file_path) log.info('%s - %s : %s', bucket, saltenv, file_path) # load the file from S3 if it's not in the cache or it's old _get_file_from_s3(metadata, saltenv, bucket, file_path, cached_file_path) log.info('Sync local cache from S3 completed.')
[ "def", "update", "(", ")", ":", "metadata", "=", "_init", "(", ")", "if", "S3_SYNC_ON_UPDATE", ":", "# sync the buckets to the local cache", "log", ".", "info", "(", "'Syncing local cache from S3...'", ")", "for", "saltenv", ",", "env_meta", "in", "six", ".", "i...
39.666667
26.904762
def _save_config(self, filename=None): """ Save the given user configuration. """ if filename is None: filename = self._config_filename parent_path = os.path.dirname(filename) if not os.path.isdir(parent_path): os.makedirs(parent_path) with open(filename, "w") as configfile: self._config.write(configfile)
[ "def", "_save_config", "(", "self", ",", "filename", "=", "None", ")", ":", "if", "filename", "is", "None", ":", "filename", "=", "self", ".", "_config_filename", "parent_path", "=", "os", ".", "path", ".", "dirname", "(", "filename", ")", "if", "not", ...
35.272727
3.818182
def transfer(self, new_region_slug): """ Transfer the image """ return self.get_data( "images/%s/actions/" % self.id, type=POST, params={"type": "transfer", "region": new_region_slug} )
[ "def", "transfer", "(", "self", ",", "new_region_slug", ")", ":", "return", "self", ".", "get_data", "(", "\"images/%s/actions/\"", "%", "self", ".", "id", ",", "type", "=", "POST", ",", "params", "=", "{", "\"type\"", ":", "\"transfer\"", ",", "\"region\"...
28.555556
11.444444
def validate_wrap(self, value): ''' Validates the type and length of ``value`` ''' if not isinstance(value, basestring): self._fail_validation_type(value, basestring) if self.regex.match(value) is None: self._fail_validation(value, 'Value does not match regular expression')
[ "def", "validate_wrap", "(", "self", ",", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "basestring", ")", ":", "self", ".", "_fail_validation_type", "(", "value", ",", "basestring", ")", "if", "self", ".", "regex", ".", "match", "(", ...
52.833333
15.833333
def volume_disk_temp_max(self, volume): """Maximum temperature of all disks making up the volume""" volume = self._get_volume(volume) if volume is not None: vol_disks = volume["disks"] if vol_disks is not None: max_temp = 0 for vol_disk in vol_disks: disk_temp = self.disk_temp(vol_disk) if disk_temp is not None and disk_temp > max_temp: max_temp = disk_temp return max_temp
[ "def", "volume_disk_temp_max", "(", "self", ",", "volume", ")", ":", "volume", "=", "self", ".", "_get_volume", "(", "volume", ")", "if", "volume", "is", "not", "None", ":", "vol_disks", "=", "volume", "[", "\"disks\"", "]", "if", "vol_disks", "is", "not...
38.357143
11.928571
def unfreeze(label, pop=False, environ=None): """Reset the environment to its state before it was frozen by :func:`freeze`. :param str label: The name for the frozen environment. :param bool pop: Destroy the freeze after use; only allow unfreeze once. :param dict environ: The environment to work on; defaults to ``os.environ``. :returns: A context manager to re-freeze the environment on exit. Usage:: unfreeze('nuke') # or with unfreeze('maya'): # do something """ environ = os.environ if environ is None else environ diff = _get_diff(environ, label, pop=pop) original = _apply_diff(environ, diff) return _refreezer(environ, diff, original)
[ "def", "unfreeze", "(", "label", ",", "pop", "=", "False", ",", "environ", "=", "None", ")", ":", "environ", "=", "os", ".", "environ", "if", "environ", "is", "None", "else", "environ", "diff", "=", "_get_diff", "(", "environ", ",", "label", ",", "po...
30.73913
23.173913
def maybe_thenable(obj, on_resolve): """ Execute a on_resolve function once the thenable is resolved, returning the same type of object inputed. If the object is not thenable, it should return on_resolve(obj) """ if isawaitable(obj) and not isinstance(obj, Promise): return await_and_execute(obj, on_resolve) if is_thenable(obj): return Promise.resolve(obj).then(on_resolve) # If it's not awaitable not a Promise, return # the function executed over the object return on_resolve(obj)
[ "def", "maybe_thenable", "(", "obj", ",", "on_resolve", ")", ":", "if", "isawaitable", "(", "obj", ")", "and", "not", "isinstance", "(", "obj", ",", "Promise", ")", ":", "return", "await_and_execute", "(", "obj", ",", "on_resolve", ")", "if", "is_thenable"...
35.133333
14.733333
def get_sections_with_students_in_course(self, course_id, params={}): """ Return list of sections including students for the passed course ID. """ include = params.get("include", []) if "students" not in include: include.append("students") params["include"] = include return self.get_sections_in_course(course_id, params)
[ "def", "get_sections_with_students_in_course", "(", "self", ",", "course_id", ",", "params", "=", "{", "}", ")", ":", "include", "=", "params", ".", "get", "(", "\"include\"", ",", "[", "]", ")", "if", "\"students\"", "not", "in", "include", ":", "include"...
38.1
13.9
def restore(self, image): """ Restore the droplet to the specified backup image A Droplet restoration will rebuild an image using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. [APIDocs]_ :param image: an image ID, an image slug, or an `Image` object representing a backup image of the droplet :type image: integer, string, or `Image` :return: an `Action` representing the in-progress operation on the droplet :rtype: Action :raises DOAPIError: if the API endpoint replies with an error """ if isinstance(image, Image): image = image.id return self.act(type='restore', image=image)
[ "def", "restore", "(", "self", ",", "image", ")", ":", "if", "isinstance", "(", "image", ",", "Image", ")", ":", "image", "=", "image", ".", "id", "return", "self", ".", "act", "(", "type", "=", "'restore'", ",", "image", "=", "image", ")" ]
41.7
20.6
def reverse_readline(m_file, blk_size=4096, max_mem=4000000): """ Generator method to read a file line-by-line, but backwards. This allows one to efficiently get data at the end of a file. Based on code by Peter Astrand <astrand@cendio.se>, using modifications by Raymond Hettinger and Kevin German. http://code.activestate.com/recipes/439045-read-a-text-file-backwards -yet-another-implementat/ Reads file forwards and reverses in memory for files smaller than the max_mem parameter, or for gzip files where reverse seeks are not supported. Files larger than max_mem are dynamically read backwards. Args: m_file (File): File stream to read (backwards) blk_size (int): The buffer size. Defaults to 4096. max_mem (int): The maximum amount of memory to involve in this operation. This is used to determine when to reverse a file in-memory versus seeking portions of a file. For bz2 files, this sets the maximum block size. Returns: Generator that returns lines from the file. Similar behavior to the file.readline() method, except the lines are returned from the back of the file. """ # Check if the file stream is a bit stream or not is_text = isinstance(m_file, io.TextIOWrapper) try: file_size = os.path.getsize(m_file.name) except AttributeError: # Bz2 files do not have name attribute. Just set file_size to above # max_mem for now. file_size = max_mem + 1 # If the file size is within our desired RAM use, just reverse it in memory # GZip files must use this method because there is no way to negative seek if file_size < max_mem or isinstance(m_file, gzip.GzipFile): for line in reversed(m_file.readlines()): yield line.rstrip() else: if isinstance(m_file, bz2.BZ2File): # for bz2 files, seeks are expensive. It is therefore in our best # interest to maximize the blk_size within limits of desired RAM # use. blk_size = min(max_mem, file_size) buf = "" m_file.seek(0, 2) if is_text: lastchar = m_file.read(1) else: lastchar = m_file.read(1).decode("utf-8") trailing_newline = (lastchar == "\n") while 1: newline_pos = buf.rfind("\n") pos = m_file.tell() if newline_pos != -1: # Found a newline line = buf[newline_pos + 1:] buf = buf[:newline_pos] if pos or newline_pos or trailing_newline: line += "\n" yield line elif pos: # Need to fill buffer toread = min(blk_size, pos) m_file.seek(pos - toread, 0) if is_text: buf = m_file.read(toread) + buf else: buf = m_file.read(toread).decode("utf-8") + buf m_file.seek(pos - toread, 0) if pos == toread: buf = "\n" + buf else: # Start-of-file return
[ "def", "reverse_readline", "(", "m_file", ",", "blk_size", "=", "4096", ",", "max_mem", "=", "4000000", ")", ":", "# Check if the file stream is a bit stream or not", "is_text", "=", "isinstance", "(", "m_file", ",", "io", ".", "TextIOWrapper", ")", "try", ":", ...
37.807229
20.216867
def fetch_celery_task_state(celery_task): """ Fetch and return the state of the given celery task. The scope of this function is global so that it can be called by subprocesses in the pool. :param celery_task: a tuple of the Celery task key and the async Celery object used to fetch the task's state :type celery_task: tuple(str, celery.result.AsyncResult) :return: a tuple of the Celery task key and the Celery state of the task :rtype: tuple[str, str] """ try: with timeout(seconds=2): # Accessing state property of celery task will make actual network request # to get the current state of the task. res = (celery_task[0], celery_task[1].state) except Exception as e: exception_traceback = "Celery Task ID: {}\n{}".format(celery_task[0], traceback.format_exc()) res = ExceptionWithTraceback(e, exception_traceback) return res
[ "def", "fetch_celery_task_state", "(", "celery_task", ")", ":", "try", ":", "with", "timeout", "(", "seconds", "=", "2", ")", ":", "# Accessing state property of celery task will make actual network request", "# to get the current state of the task.", "res", "=", "(", "cele...
44.681818
24.045455
def get_nodes(environment=None): """Gets all nodes found in the nodes/ directory""" if not os.path.exists('nodes'): return [] nodes = [] for filename in sorted( [f for f in os.listdir('nodes') if (not os.path.isdir(f) and f.endswith(".json") and not f.startswith('.'))]): fqdn = ".".join(filename.split('.')[:-1]) # Remove .json from name node = get_node(fqdn) if environment is None or node.get('chef_environment') == environment: nodes.append(node) return nodes
[ "def", "get_nodes", "(", "environment", "=", "None", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "'nodes'", ")", ":", "return", "[", "]", "nodes", "=", "[", "]", "for", "filename", "in", "sorted", "(", "[", "f", "for", "f", "in"...
39.785714
16.357143
def index(self, columnnames, sort=True): """Return a tableindex object. :class:`tableindex` lets one get the row numbers of the rows holding given values for the columns for which the index is created. It uses an in-memory index on which a binary search is done. By default the table is sorted on the given columns to get the correct index order. For example:: t = table('3c343.MS') tinx = t.index('ANTENNA1') print tinx.rownumbers(0) # print rownrs containing ANTENNA1=0 """ from .tableindex import tableindex return tableindex(self, columnnames, sort)
[ "def", "index", "(", "self", ",", "columnnames", ",", "sort", "=", "True", ")", ":", "from", ".", "tableindex", "import", "tableindex", "return", "tableindex", "(", "self", ",", "columnnames", ",", "sort", ")" ]
36.444444
21.722222
def _next_datetime_with_utc_hour(table_name, utc_hour): ''' Datapipeline API is throttling us, as all the pipelines are started at the same time. We would like to uniformly distribute the startTime over a 60 minute window. Return the next future utc datetime where hour == utc_hour minute = A value between 0-59 (depending on table name) second = A value between 0-59 (depending on table name) ''' today = datetime.date.today() # The minute and second values generated are deterministic, as we do not want # pipeline definition to change for every run. start_date_time = datetime.datetime( year=today.year, month=today.month, day=today.day, hour=utc_hour, minute=_get_deterministic_value_for_table_name(table_name, 60), second=_get_deterministic_value_for_table_name(table_name, 60) ) if start_date_time < datetime.datetime.utcnow(): one_day = datetime.timedelta(days=1) start_date_time += one_day return start_date_time
[ "def", "_next_datetime_with_utc_hour", "(", "table_name", ",", "utc_hour", ")", ":", "today", "=", "datetime", ".", "date", ".", "today", "(", ")", "# The minute and second values generated are deterministic, as we do not want", "# pipeline definition to change for every run.", ...
36.892857
23.321429
def run_sync(self): """ Runs the message loop until the Pebble disconnects. This method will block until the watch disconnects or a fatal error occurs. For alternatives that don't block forever, see :meth:`pump_reader` and :meth:`run_async`. """ while self.connected: try: self.pump_reader() except PacketDecodeError as e: logger.warning("Packet decode failed: %s", e) except ConnectionError: break
[ "def", "run_sync", "(", "self", ")", ":", "while", "self", ".", "connected", ":", "try", ":", "self", ".", "pump_reader", "(", ")", "except", "PacketDecodeError", "as", "e", ":", "logger", ".", "warning", "(", "\"Packet decode failed: %s\"", ",", "e", ")",...
37
20.714286
def panels(): """Show all panels for a case.""" if request.method == 'POST': # update an existing panel csv_file = request.files['csv_file'] content = csv_file.stream.read() lines = None try: if b'\n' in content: lines = content.decode('utf-8', 'ignore').split('\n') else: lines = content.decode('windows-1252').split('\r') except Exception as err: flash('Something went wrong while parsing the panel CSV file! ({})'.format(err), 'danger') return redirect(request.referrer) new_panel_name = request.form.get('new_panel_name') if new_panel_name: #create a new panel new_panel_id = controllers.new_panel( store=store, institute_id=request.form['institute'], panel_name=new_panel_name, display_name=request.form['display_name'], csv_lines=lines, ) if new_panel_id is None: flash('Something went wrong and the panel list was not updated!','warning') return redirect(request.referrer) else: flash("new gene panel added, {}!".format(new_panel_name),'success') return redirect(url_for('panels.panel', panel_id=new_panel_id)) else: # modify an existing panel update_option = request.form['modify_option'] panel_obj= controllers.update_panel( store=store, panel_name=request.form['panel_name'], csv_lines=lines, option=update_option ) if panel_obj is None: return abort(404, "gene panel not found: {}".format(request.form['panel_name'])) else: return redirect(url_for('panels.panel', panel_id=panel_obj['_id'])) institutes = list(user_institutes(store, current_user)) panel_names = [name for institute in institutes for name in store.gene_panels(institute_id=institute['_id']).distinct('panel_name')] panel_versions = {} for name in panel_names: panel_versions[name]=store.gene_panels(panel_id=name) panel_groups = [] for institute_obj in institutes: institute_panels = store.latest_panels(institute_obj['_id']) panel_groups.append((institute_obj, institute_panels)) return dict(panel_groups=panel_groups, panel_names=panel_names, panel_versions=panel_versions, institutes=institutes)
[ "def", "panels", "(", ")", ":", "if", "request", ".", "method", "==", "'POST'", ":", "# update an existing panel", "csv_file", "=", "request", ".", "files", "[", "'csv_file'", "]", "content", "=", "csv_file", ".", "stream", ".", "read", "(", ")", "lines", ...
42.387097
21.048387
def experiments_create(self, subject_id, image_group_id, properties): """Create an experiment object with subject, and image group. Objects are referenced by their unique identifiers. The API ensure that at time of creation all referenced objects exist. Referential consistency, however, is currently not enforced when objects are deleted. Expects experiment name in property list. Raises ValueError if no valid name is given. If any of the referenced objects do not exist a ValueError is thrown. Parameters ---------- subject_id : string Unique identifier of subject image_group_id : string Unique identifier of image group properties : Dictionary Set of experiment properties. Is required to contain at least the experiment name Returns ------- ExperimentHandle Handle for created experiment object in database """ # Ensure that reference subject exists if self.subjects_get(subject_id) is None: raise ValueError('unknown subject: ' + subject_id) # Ensure that referenced image group exists if self.image_groups_get(image_group_id) is None: raise ValueError('unknown image group: ' + image_group_id) return self.experiments.create_object(subject_id, image_group_id, properties)
[ "def", "experiments_create", "(", "self", ",", "subject_id", ",", "image_group_id", ",", "properties", ")", ":", "# Ensure that reference subject exists", "if", "self", ".", "subjects_get", "(", "subject_id", ")", "is", "None", ":", "raise", "ValueError", "(", "'u...
42.515152
22.393939
def __get_query_basic(cls, date_field=None, start=None, end=None, filters={}): """ Create a es_dsl query object with the date range and filters. :param date_field: field with the date value :param start: date with the from value, should be a datetime.datetime object :param end: date with the to value, should be a datetime.datetime object :param filters: dict with the filters to be applied :return: a DSL query containing the required parameters Ex: {'query': {'bool': {'filter': [{'range': {'DATE_FIELD': {'gte': '2015-05-19T00:00:00', 'lte': '2018-05-18T00:00:00'}}}], 'must': [{'match_phrase': {'first_name': 'Jhon'}}, {'match_phrase': {'last_name': 'Doe'}}, {'match_phrase': {'Phone': 2222222}} ]}}} """ query_basic = Search() query_filters = cls.__get_query_filters(filters) for f in query_filters: query_basic = query_basic.query(f) query_filters_inverse = cls.__get_query_filters(filters, inverse=True) # Here, don't forget the '~'. That is what makes this an inverse filter. for f in query_filters_inverse: query_basic = query_basic.query(~f) if not date_field: query_range = {} else: query_range = cls.__get_query_range(date_field, start, end) # Applying the range filter query_basic = query_basic.filter('range', **query_range) return query_basic
[ "def", "__get_query_basic", "(", "cls", ",", "date_field", "=", "None", ",", "start", "=", "None", ",", "end", "=", "None", ",", "filters", "=", "{", "}", ")", ":", "query_basic", "=", "Search", "(", ")", "query_filters", "=", "cls", ".", "__get_query_...
44.473684
23.947368
def _get_token_type_enum(self): """Builds the python source code for the Parser TokenType enum.""" fmt = "class TokenType(Enum):\n" \ "{indent}\"\"\"The token types for parse nodes generated by the Parser.\"\"\"\n" \ "{indent}" + \ "\n{indent}".join("{1} = {0}".format(num + 1, r.name) for num, r in enumerate(self.rules)) return fmt.format(indent=self.indent)
[ "def", "_get_token_type_enum", "(", "self", ")", ":", "fmt", "=", "\"class TokenType(Enum):\\n\"", "\"{indent}\\\"\\\"\\\"The token types for parse nodes generated by the Parser.\\\"\\\"\\\"\\n\"", "\"{indent}\"", "+", "\"\\n{indent}\"", ".", "join", "(", "\"{1} = {0}\"", ".", "f...
56.571429
20
def _stub_task(self, description, tags=None, **kw): """ Given a description, stub out a task dict. """ # If whitespace is not removed here, TW will do it when we pass the # task to it. task = {"description": description.strip()} # Allow passing "tags" in as part of kw. if 'tags' in kw and tags is None: task['tags'] = tags del(kw['tags']) if tags is not None: task['tags'] = tags task.update(kw) if self._marshal: return Task.from_stub(task, udas=self.config.get_udas()) return task
[ "def", "_stub_task", "(", "self", ",", "description", ",", "tags", "=", "None", ",", "*", "*", "kw", ")", ":", "# If whitespace is not removed here, TW will do it when we pass the", "# task to it.", "task", "=", "{", "\"description\"", ":", "description", ".", "stri...
28.428571
21.380952
def download_sample(job, sample, inputs): """ Download the input sample :param JobFunctionWrappingJob job: passed by Toil automatically :param tuple sample: Tuple containing (UUID,URL) of a sample :param Namespace inputs: Stores input arguments (see main) """ uuid, url = sample job.fileStore.logToMaster('Downloading sample: {}'.format(uuid)) # Download sample tar_id = job.addChildJobFn(download_url_job, url, s3_key_path=inputs.ssec, disk='30G').rv() # Create copy of inputs for each sample sample_inputs = argparse.Namespace(**vars(inputs)) sample_inputs.uuid = uuid sample_inputs.cores = multiprocessing.cpu_count() # Call children and follow-on jobs job.addFollowOnJobFn(process_sample, sample_inputs, tar_id, cores=2, disk='60G')
[ "def", "download_sample", "(", "job", ",", "sample", ",", "inputs", ")", ":", "uuid", ",", "url", "=", "sample", "job", ".", "fileStore", ".", "logToMaster", "(", "'Downloading sample: {}'", ".", "format", "(", "uuid", ")", ")", "# Download sample", "tar_id"...
43.555556
18.444444
async def create_email_identity(self, client_id, identity, passwd, *, user_id=None # 如果设置用户ID,则创建该用户的新登录身份 ) -> SessionIdentity : """ 创建使用电子邮件地址和密码登录的用户身份 """ assert passwd value, _ = await self._client.get(f"/users/identity/{identity}") if value: raise ValueError(f' {identity} has already been registered') if user_id is None: # 新用户 user_id = await self._user_id_generator.new() # 加密登录标识 hashed = sha256_crypt.using(rounds=2000, salt_size=8).hash(passwd) profile = { "user_id": user_id, "identity": identity, "hashed": hashed } token = genword(length=32, charset="ascii_50") key = f"/users/verifying/{token}" await self._client.put(key, json.dumps(profile)) return SessionIdentity(user_id=user_id, identity=identity)
[ "async", "def", "create_email_identity", "(", "self", ",", "client_id", ",", "identity", ",", "passwd", ",", "*", ",", "user_id", "=", "None", "# 如果设置用户ID,则创建该用户的新登录身份", ")", "->", "SessionIdentity", ":", "assert", "passwd", "value", ",", "_", "=", "await", ...
30.1
22.033333
def _get_library_search_paths(): """ Returns a list of library search paths, considering of the current working directory, default paths and paths from environment variables. """ search_paths = [ '', '/usr/lib64', '/usr/local/lib64', '/usr/lib', '/usr/local/lib', '/run/current-system/sw/lib', '/usr/lib/x86_64-linux-gnu/', os.path.abspath(os.path.dirname(__file__)) ] if sys.platform == 'darwin': path_environment_variable = 'DYLD_LIBRARY_PATH' else: path_environment_variable = 'LD_LIBRARY_PATH' if path_environment_variable in os.environ: search_paths.extend(os.environ[path_environment_variable].split(':')) return search_paths
[ "def", "_get_library_search_paths", "(", ")", ":", "search_paths", "=", "[", "''", ",", "'/usr/lib64'", ",", "'/usr/local/lib64'", ",", "'/usr/lib'", ",", "'/usr/local/lib'", ",", "'/run/current-system/sw/lib'", ",", "'/usr/lib/x86_64-linux-gnu/'", ",", "os", ".", "pa...
33.227273
17.045455
def create(self, service_name, json, **kwargs): """Create a new AppNexus object""" return self._send(requests.post, service_name, json, **kwargs)
[ "def", "create", "(", "self", ",", "service_name", ",", "json", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_send", "(", "requests", ".", "post", ",", "service_name", ",", "json", ",", "*", "*", "kwargs", ")" ]
53
12.333333
def add_route(app, fn, context=default_context): """ a decorator that adds a transmute route to the application. """ transmute_func = TransmuteFunction( fn, args_not_from_request=["request"] ) handler = create_handler(transmute_func, context=context) get_swagger_spec(app).add_func(transmute_func, context) for p in transmute_func.paths: aiohttp_path = _convert_to_aiohttp_path(p) resource = app.router.add_resource(aiohttp_path) for method in transmute_func.methods: resource.add_route(method, handler)
[ "def", "add_route", "(", "app", ",", "fn", ",", "context", "=", "default_context", ")", ":", "transmute_func", "=", "TransmuteFunction", "(", "fn", ",", "args_not_from_request", "=", "[", "\"request\"", "]", ")", "handler", "=", "create_handler", "(", "transmu...
35.8125
13.8125
def _family_id_to_superclass(self, family_id): """ Temporary hardcoded mapping from serialized family id to either `class :XBlock:` or `:XBlockAside` """ for family in [XBlock, XBlockAside]: if family_id == family.entry_point: return family raise ValueError('No such family: {}'.format(family_id))
[ "def", "_family_id_to_superclass", "(", "self", ",", "family_id", ")", ":", "for", "family", "in", "[", "XBlock", ",", "XBlockAside", "]", ":", "if", "family_id", "==", "family", ".", "entry_point", ":", "return", "family", "raise", "ValueError", "(", "'No s...
44.75
14.75
def entries(self): """ Return the sorted list of entries in this container, each represented by its full path inside the container. :rtype: list of strings (path) :raises: TypeError: if this container does not exist :raises: OSError: if an error occurred reading the given container (e.g., empty file, damaged file, etc.) """ self.log(u"Getting entries") if not self.exists(): self.log_exc(u"This container does not exist. Wrong path?", None, True, TypeError) if self.actual_container is None: self.log_exc(u"The actual container object has not been set", None, True, TypeError) return self.actual_container.entries
[ "def", "entries", "(", "self", ")", ":", "self", ".", "log", "(", "u\"Getting entries\"", ")", "if", "not", "self", ".", "exists", "(", ")", ":", "self", ".", "log_exc", "(", "u\"This container does not exist. Wrong path?\"", ",", "None", ",", "True", ",", ...
46.1875
19.6875
def submit_property_batch( self, name_id, timeout=60, operations=None, custom_headers=None, raw=False, **operation_config): """Submits a property batch. Submits a batch of property operations. Either all or none of the operations will be committed. :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. :type name_id: str :param timeout: The server timeout for performing the operation in seconds. This timeout specifies the time duration that the client is willing to wait for the requested operation to complete. The default value for this parameter is 60 seconds. :type timeout: long :param operations: A list of the property batch operations to be executed. :type operations: list[~azure.servicefabric.models.PropertyBatchOperation] :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: PropertyBatchInfo or ClientRawResponse if raw=true :rtype: ~azure.servicefabric.models.PropertyBatchInfo or ~msrest.pipeline.ClientRawResponse :raises: :class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>` """ property_batch_description_list = models.PropertyBatchDescriptionList(operations=operations) api_version = "6.0" # Construct URL url = self.submit_property_batch.metadata['url'] path_format_arguments = { 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._serialize.body(property_batch_description_list, 'PropertyBatchDescriptionList') # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 409]: raise models.FabricErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('SuccessfulPropertyBatchInfo', response) if response.status_code == 409: deserialized = self._deserialize('FailedPropertyBatchInfo', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
[ "def", "submit_property_batch", "(", "self", ",", "name_id", ",", "timeout", "=", "60", ",", "operations", "=", "None", ",", "custom_headers", "=", "None", ",", "raw", "=", "False", ",", "*", "*", "operation_config", ")", ":", "property_batch_description_list"...
43.644737
26.947368
def close(self): """Disposes of any internal state. Currently, this closes the PoolManager and any active ProxyManager, which closes any pooled connections. """ self.poolmanager.clear() for proxy in self.proxy_manager.values(): proxy.clear()
[ "def", "close", "(", "self", ")", ":", "self", ".", "poolmanager", ".", "clear", "(", ")", "for", "proxy", "in", "self", ".", "proxy_manager", ".", "values", "(", ")", ":", "proxy", ".", "clear", "(", ")" ]
32.666667
15
def handle_data(self, context, data, dt): """ Calls the callable only when the rule is triggered. """ if self.rule.should_trigger(dt): self.callback(context, data)
[ "def", "handle_data", "(", "self", ",", "context", ",", "data", ",", "dt", ")", ":", "if", "self", ".", "rule", ".", "should_trigger", "(", "dt", ")", ":", "self", ".", "callback", "(", "context", ",", "data", ")" ]
33.666667
3.333333
def _sanitize_dates(start, end): """ Return (datetime_start, datetime_end) tuple if start is None - default is 2015/01/01 if end is None - default is today """ if isinstance(start, int): # regard int as year start = datetime(start, 1, 1) start = to_datetime(start) if isinstance(end, int): end = datetime(end, 1, 1) end = to_datetime(end) if start is None: start = datetime(2015, 1, 1) if end is None: end = datetime.today() if start > end: raise ValueError('start must be an earlier date than end') return start, end
[ "def", "_sanitize_dates", "(", "start", ",", "end", ")", ":", "if", "isinstance", "(", "start", ",", "int", ")", ":", "# regard int as year\r", "start", "=", "datetime", "(", "start", ",", "1", ",", "1", ")", "start", "=", "to_datetime", "(", "start", ...
28.136364
12.590909
def convert(word): """This method converts given `word` to UTF-8 encoding and `bytes` type for the SWIG wrapper.""" if six.PY2: if isinstance(word, unicode): return word.encode('utf-8') else: return word.decode('utf-8').encode('utf-8') # make sure it is real utf8, otherwise complain else: # ==> Py3 if isinstance(word, bytes): return word.decode('utf-8') # bytes must be in utf8 return word
[ "def", "convert", "(", "word", ")", ":", "if", "six", ".", "PY2", ":", "if", "isinstance", "(", "word", ",", "unicode", ")", ":", "return", "word", ".", "encode", "(", "'utf-8'", ")", "else", ":", "return", "word", ".", "decode", "(", "'utf-8'", ")...
38.916667
17.583333
def load_config(self, argv=None, aliases=None, flags=None): """Parse the configuration and generate the Config object. After loading, any arguments that are not key-value or flags will be stored in self.extra_args - a list of unparsed command-line arguments. This is used for arguments such as input files or subcommands. Parameters ---------- argv : list, optional A list that has the form of sys.argv[1:] which has unicode elements of the form u"key=value". If this is None (default), then self.argv will be used. aliases : dict A dict of aliases for configurable traits. Keys are the short aliases, Values are the resolved trait. Of the form: `{'alias' : 'Configurable.trait'}` flags : dict A dict of flags, keyed by str name. Values can be Config objects or dicts. When the flag is triggered, The config is loaded as `self.config.update(cfg)`. """ from IPython.config.configurable import Configurable self.clear() if argv is None: argv = self.argv if aliases is None: aliases = self.aliases if flags is None: flags = self.flags # ensure argv is a list of unicode strings: uargv = self._decode_argv(argv) for idx,raw in enumerate(uargv): # strip leading '-' item = raw.lstrip('-') if raw == '--': # don't parse arguments after '--' # this is useful for relaying arguments to scripts, e.g. # ipython -i foo.py --pylab=qt -- args after '--' go-to-foo.py self.extra_args.extend(uargv[idx+1:]) break if kv_pattern.match(raw): lhs,rhs = item.split('=',1) # Substitute longnames for aliases. if lhs in aliases: lhs = aliases[lhs] if '.' not in lhs: # probably a mistyped alias, but not technically illegal warn.warn("Unrecognized alias: '%s', it will probably have no effect."%lhs) try: self._exec_config_str(lhs, rhs) except Exception: raise ArgumentError("Invalid argument: '%s'" % raw) elif flag_pattern.match(raw): if item in flags: cfg,help = flags[item] self._load_flag(cfg) else: raise ArgumentError("Unrecognized flag: '%s'"%raw) elif raw.startswith('-'): kv = '--'+item if kv_pattern.match(kv): raise ArgumentError("Invalid argument: '%s', did you mean '%s'?"%(raw, kv)) else: raise ArgumentError("Invalid argument: '%s'"%raw) else: # keep all args that aren't valid in a list, # in case our parent knows what to do with them. self.extra_args.append(item) return self.config
[ "def", "load_config", "(", "self", ",", "argv", "=", "None", ",", "aliases", "=", "None", ",", "flags", "=", "None", ")", ":", "from", "IPython", ".", "config", ".", "configurable", "import", "Configurable", "self", ".", "clear", "(", ")", "if", "argv"...
40.868421
18.118421
def activate_program(self, program): """ Called by program which desires to manipulate this actuator, when it is activated. """ self.logger.debug("activate_program %s", program) if program in self.program_stack: return with self._program_lock: self.logger.debug("activate_program got through %s", program) self.program_stack.append(program) self._update_program_stack()
[ "def", "activate_program", "(", "self", ",", "program", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"activate_program %s\"", ",", "program", ")", "if", "program", "in", "self", ".", "program_stack", ":", "return", "with", "self", ".", "_program_loc...
38.25
15.416667
def histogram_pb(tag, data, buckets=None, description=None): """Create a histogram summary protobuf. Arguments: tag: String tag for the summary. data: A `np.array` or array-like form of any shape. Must have type castable to `float`. buckets: Optional positive `int`. The output will have this many buckets, except in two edge cases. If there is no data, then there are no buckets. If there is data but all points have the same value, then there is one bucket whose left and right endpoints are the same. description: Optional long-form description for this summary, as a `str`. Markdown is supported. Defaults to empty. Returns: A `summary_pb2.Summary` protobuf object. """ bucket_count = DEFAULT_BUCKET_COUNT if buckets is None else buckets data = np.array(data).flatten().astype(float) if data.size == 0: buckets = np.array([]).reshape((0, 3)) else: min_ = np.min(data) max_ = np.max(data) range_ = max_ - min_ if range_ == 0: center = min_ buckets = np.array([[center - 0.5, center + 0.5, float(data.size)]]) else: bucket_width = range_ / bucket_count offsets = data - min_ bucket_indices = np.floor(offsets / bucket_width).astype(int) clamped_indices = np.minimum(bucket_indices, bucket_count - 1) one_hots = (np.array([clamped_indices]).transpose() == np.arange(0, bucket_count)) # broadcast assert one_hots.shape == (data.size, bucket_count), ( one_hots.shape, (data.size, bucket_count)) bucket_counts = np.sum(one_hots, axis=0) edges = np.linspace(min_, max_, bucket_count + 1) left_edges = edges[:-1] right_edges = edges[1:] buckets = np.array([left_edges, right_edges, bucket_counts]).transpose() tensor = tensor_util.make_tensor_proto(buckets, dtype=np.float64) summary_metadata = metadata.create_summary_metadata( display_name=None, description=description) summary = summary_pb2.Summary() summary.value.add(tag=tag, metadata=summary_metadata, tensor=tensor) return summary
[ "def", "histogram_pb", "(", "tag", ",", "data", ",", "buckets", "=", "None", ",", "description", "=", "None", ")", ":", "bucket_count", "=", "DEFAULT_BUCKET_COUNT", "if", "buckets", "is", "None", "else", "buckets", "data", "=", "np", ".", "array", "(", "...
40.288462
18.903846
def estimate_tx_gas(self, safe_address: str, to: str, value: int, data: bytes, operation: int) -> int: """ Estimate tx gas. Use the max of calculation using safe method and web3 if operation == CALL or use just the safe calculation otherwise """ # Costs to route through the proxy and nested calls proxy_gas = 1000 # https://github.com/ethereum/solidity/blob/dfe3193c7382c80f1814247a162663a97c3f5e67/libsolidity/codegen/ExpressionCompiler.cpp#L1764 # This was `false` before solc 0.4.21 -> `m_context.evmVersion().canOverchargeGasForCall()` # So gas needed by caller will be around 35k old_call_gas = 35000 safe_gas_estimation = (self.estimate_tx_gas_with_safe(safe_address, to, value, data, operation) + proxy_gas + old_call_gas) # We cannot estimate DELEGATECALL (different storage) if SafeOperation(operation) == SafeOperation.CALL: try: web3_gas_estimation = (self.estimate_tx_gas_with_web3(safe_address, to, value, data) + proxy_gas + old_call_gas) except ValueError: web3_gas_estimation = 0 return max(safe_gas_estimation, web3_gas_estimation) else: return safe_gas_estimation
[ "def", "estimate_tx_gas", "(", "self", ",", "safe_address", ":", "str", ",", "to", ":", "str", ",", "value", ":", "int", ",", "data", ":", "bytes", ",", "operation", ":", "int", ")", "->", "int", ":", "# Costs to route through the proxy and nested calls", "p...
55.083333
28.5
def track_statistic(self, name, description='', max_rows=None): """ Create a Statistic object in the Tracker. """ if name in self._tables: raise TableConflictError(name) if max_rows is None: max_rows = AnonymousUsageTracker.MAX_ROWS_PER_TABLE self.register_table(name, self.uuid, 'Statistic', description) self._tables[name] = Statistic(name, self, max_rows=max_rows)
[ "def", "track_statistic", "(", "self", ",", "name", ",", "description", "=", "''", ",", "max_rows", "=", "None", ")", ":", "if", "name", "in", "self", ".", "_tables", ":", "raise", "TableConflictError", "(", "name", ")", "if", "max_rows", "is", "None", ...
43.8
13.6
def get_draft_page_by_id(self, page_id, status='draft'): """ Provide content by id with status = draft :param page_id: :param status: :return: """ url = 'rest/api/content/{page_id}?status={status}'.format(page_id=page_id, status=status) return self.get(url)
[ "def", "get_draft_page_by_id", "(", "self", ",", "page_id", ",", "status", "=", "'draft'", ")", ":", "url", "=", "'rest/api/content/{page_id}?status={status}'", ".", "format", "(", "page_id", "=", "page_id", ",", "status", "=", "status", ")", "return", "self", ...
34.777778
17
def patched_context(*module_names, **kwargs): """apply emulation patches only for a specific context :param module_names: var-args for the modules to patch, as in :func:`patch` :param local: if True, unpatching is done on every switch-out, and re-patching on every switch-in, so that they are only applied for the one coroutine :returns: a contextmanager that patches on ``__enter__`` and unpatches on ``__exit__`` """ local = kwargs.pop('local', False) if kwargs: raise TypeError("patched_context() got an unexpected keyword " + "argument %r" % kwargs.keys()[0]) patch(*module_names) if local: @scheduler.local_incoming_hook @scheduler.local_outgoing_hook def hook(direction, target): {1: patch, 2: unpatch}[direction](*module_names) yield unpatch(*module_names) if local: scheduler.remove_local_incoming_hook(hook) scheduler.remove_local_outgoing_hook(hook)
[ "def", "patched_context", "(", "*", "module_names", ",", "*", "*", "kwargs", ")", ":", "local", "=", "kwargs", ".", "pop", "(", "'local'", ",", "False", ")", "if", "kwargs", ":", "raise", "TypeError", "(", "\"patched_context() got an unexpected keyword \"", "+...
33.3
21.866667
def get_dataset(self, key, info, out=None): """Get a dataset from the file.""" logger.debug("Reading %s.", key.name) values = self.file_content[key.name] selected = np.array(self.selected) if key.name in ("Latitude", "Longitude"): values = values / 10000. if key.name in ('Tsurf', 'CloudTopPres', 'CloudTopTemp'): goods = values > -9998. selected = np.array(selected & goods) if key.name in ('Tsurf', "Alt_surface", "CloudTopTemp"): values = values / 100. if key.name in ("CloudTopPres"): values = values / 10. else: selected = self.selected info.update(self.finfo) fill_value = np.nan if key.name == 'ct': fill_value = 0 info['_FillValue'] = 0 ds = DataArray(values, dims=['y', 'x'], attrs=info).where(selected, fill_value) # update dataset info with file_info return ds
[ "def", "get_dataset", "(", "self", ",", "key", ",", "info", ",", "out", "=", "None", ")", ":", "logger", ".", "debug", "(", "\"Reading %s.\"", ",", "key", ".", "name", ")", "values", "=", "self", ".", "file_content", "[", "key", ".", "name", "]", "...
35
15.071429
def get_git_version(path): """Get the GIT version.""" branch_name = get_git_cleaned_branch_name(path) # Determine whether working copy is dirty (i.e. contains modified files) mods = run_cmd(path, 'git', 'status', '--porcelain', '--untracked-files=no') dirty = '.dirty' if mods else '' # Get a list of all commits on branch, with corresponding branch/tag refs # Each line looks something like: "d3e4d42 (HEAD, master, tag: v0.1)" git_output = run_cmd(path, 'git', 'log', '--pretty="%h%d"') commits = git_output.strip().replace('"', '').split('\n') num_commits_since_branch = len(commits) # Short hash of the latest commit short_commit_name = commits[0].partition(' ')[0] # A valid version is sequence of dotted numbers optionally prefixed by 'v' valid_version = re.compile(r'^v?([\.\d]+)$') def tagged_version(commit): """First tag on commit that is valid version, as a list of numbers.""" refs = commit.partition(' ')[2] for ref in refs.lstrip('(').rstrip(')').split(', '): if ref.startswith('tag: '): tag = ref[5:].lower() found = valid_version.match(tag) if found: return [int(v) for v in found.group(1).split('.') if v] return [] # Walk back along branch and find first valid tagged version (or use 0.0) for commit in commits: version_numbers = tagged_version(commit) if version_numbers: break else: version_numbers = [0, 0] # It is a release if current commit has a version tag (and dir is clean) release = (commit == commits[0]) and not dirty if not release: # We are working towards the next (minor) release according to PEP 440 version_numbers[-1] += 1 version = '.'.join([str(v) for v in version_numbers]) if not release: # Development version contains extra embellishments version = ("%s.dev%d+%s.%s%s" % (version, num_commits_since_branch, branch_name, short_commit_name, dirty)) return version
[ "def", "get_git_version", "(", "path", ")", ":", "branch_name", "=", "get_git_cleaned_branch_name", "(", "path", ")", "# Determine whether working copy is dirty (i.e. contains modified files)", "mods", "=", "run_cmd", "(", "path", ",", "'git'", ",", "'status'", ",", "'-...
47.25
19.068182
def get_argval(argstr_, type_=None, default=None, help_=None, smartcast=True, return_specified=None, argv=None, verbose=None, debug=None, return_was_specified=False, pos=None): r""" Returns a value of an argument specified on the command line after some flag Args: argstr_ (str or tuple): string or tuple of strings denoting the command line values to parse type_ (None): type of the variable to parse (default = None) default (None): (default = None) help_ (None): help for this argument (not fully integrated) (default = None) smartcast (bool): tries to be smart about casting the parsed strings (default = True) return_specified (bool): (default = False) argv (None): override sys.argv with custom command line vector (default = None) pos (int): if specified the argument can also be found in position `pos` of the command line varargs TODO: depricate return_was_specified CommandLine: python -m utool.util_arg --test-get_argval python -m utool.util_arg --exec-get_argval:0 python -m utool.util_arg --exec-get_argval:1 python -c "import utool; print([(type(x), x) for x in [utool.get_argval('--quest')]])" --quest="holy grail" python -c "import utool; print([(type(x), x) for x in [utool.get_argval('--quest')]])" --quest="42" python -c "import utool; print([(type(x), x) for x in [utool.get_argval('--quest')]])" --quest=42 python -c "import utool; print([(type(x), x) for x in [utool.get_argval('--quest')]])" --quest 42 python -c "import utool; print([(type(x), x) for x in [utool.get_argval('--quest', float)]])" --quest 42 python -c "import utool; print([(type(x), x) for x in [utool.get_argval(('--nAssign'), int)]])" --nAssign 42 python -c "import utool; print([(type(x), x) for x in [utool.get_argval(('--test'), str)]])" --test python -c "import utool; print([(type(x), x) for x in [utool.get_argval(('--test'), str)]])" --test "foobar is good" --youbar ok Example: >>> # ENABLE_DOCTEST >>> from utool.util_arg import * # NOQA >>> import utool as ut >>> import sys >>> argv = ['--spam', 'eggs', '--quest=holy grail', '--ans=42', '--the-val=1,2,3'] >>> # specify a list of args and kwargs to get_argval >>> argstr_kwargs_list = [ >>> ('--spam', dict(type_=str, default=None, argv=argv)), >>> ('--quest', dict(type_=str, default=None, argv=argv)), >>> (('--ans', '--foo'), dict(type_=int, default=None, argv=argv)), >>> (('--not-there', '--absent'), dict(argv=argv)), >>> ('--the_val', dict(type_=list, argv=argv)), >>> ('--the-val', dict(type_=list, argv=argv)), >>> ] >>> # Execute the command with for each of the test cases >>> res_list = [] >>> argstr_list = ut.get_list_column(argstr_kwargs_list, 0) >>> for argstr_, kwargs in argstr_kwargs_list: >>> res = get_argval(argstr_, **kwargs) >>> res_list.append(res) >>> result = ut.repr2(ut.odict(zip(argstr_list, res_list)), nl=1) >>> result = result.replace('u\'', '\'') # hack >>> print(result) { '--spam': 'eggs', '--quest': 'holy grail', ('--ans', '--foo'): 42, ('--not-there', '--absent'): None, '--the_val': [1, 2, 3], '--the-val': [1, 2, 3], } Example: >>> # ENABLE_DOCTEST >>> from utool.util_arg import * # NOQA >>> import utool as ut >>> import sys >>> argv = ['--slice1', '::', '--slice2=4:', '--slice3=::4', '--slice4', '[1,2,3,4]', '--slice5=3'] >>> # specify a list of args and kwargs to get_argval >>> argstr_kwargs_list = [ >>> ('--slice1', dict(type_='fuzzy_subset', default=None, argv=argv)), >>> ('--slice2', dict(type_='fuzzy_subset', default=None, argv=argv)), >>> ('--slice3', dict(type_='fuzzy_subset', default=None, argv=argv)), >>> ('--slice4', dict(type_='fuzzy_subset', default=None, argv=argv)), >>> ('--slice5', dict(type_='fuzzy_subset', default=None, argv=argv)), >>> ] >>> # Execute the command with for each of the test cases >>> res_list = [] >>> argstr_list = ut.get_list_column(argstr_kwargs_list, 0) >>> list1 = [1, 3, 5, 7, 9] >>> import numpy as np >>> list2 = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 1]]) >>> for argstr_, kwargs in argstr_kwargs_list: >>> res = get_argval(argstr_, **kwargs) >>> print('---') >>> print('res = %r' % (res,)) >>> print('list1[%r=%r] = %r' % (argstr_, res, ut.take(list1, res),)) >>> print('list2[%r=%r] = %r' % (argstr_, res, list2[res].tolist(),)) >>> res_list.append(res) >>> result = ut.repr4(ut.odict(zip(argstr_list, res_list))) >>> result = result.replace('u\'', '\'') # hack >>> print(result) """ if verbose is None: pass # verbose = VERYVERBOSE if debug is None: debug = DEBUG # debug = VERYVERBOSE if argv is None: argv = sys.argv #verbose = 1 if verbose: print('[get_argval] Searching Commandline for argstr_=%r' % (argstr_,)) #print('[get_argval] * type_ = %r' % (type_,)) #print('[get_argval] * default = %r' % (default,)) #print('[get_argval] * help_ = %r' % (help_,)) #print('[get_argval] * smartcast = %r' % (smartcast,)) if return_specified is None: return_specified = return_was_specified #print(argstr_) was_specified = False arg_after = default if type_ is bool: arg_after = False if default is None else default try: # New for loop way (accounts for =) argstr_list = meta_util_iter.ensure_iterable(argstr_) # arg registration _register_arg(argstr_list, type_, default, help_) # expand out hypens EXPAND_HYPENS = True if EXPAND_HYPENS: argstr_list2 = [] seen_ = set([]) for argstr in argstr_list: if argstr not in seen_: argstr_list2.append(argstr) seen_.add(argstr) if argstr.startswith('--'): num = 2 elif argstr.startswith('-'): num = 1 else: continue argstr2_0 = argstr[0:num] + argstr[num:].replace('_', '-') argstr2_1 = argstr[0:num] + argstr[num:].replace('-', '_') if argstr2_0 not in seen_: argstr_list2.append(argstr2_0) seen_.add(argstr2_0) if argstr2_1 not in seen_: argstr_list2.append(argstr2_1) seen_.add(argstr2_1) argstr_list = argstr_list2 # Check environment variables for default as well as argv import os """ set UTOOL_NOCNN=True export UTOOL_NOCNN True """ #argv_orig = argv[:] for key, val in os.environ.items(): key = key.upper() sentinal = 'UTOOL_' if key.startswith(sentinal): key = '--' + key[len(sentinal):].lower() new_argv = [key, val] if val.upper() in ['TRUE', 'FALSE', 'ON', 'OFF']: # handled by get_argflag continue argv = argv[:] + new_argv if debug: print('argv.extend(new_argv=%r)' % (new_argv,)) for argx, item in enumerate(argv): for argstr in argstr_list: if item == argstr: if type_ is bool: if debug: print('[get_argval] ... argstr=%r' % (argstr,)) print('[get_argval] ... Found bool argx=%r' % (argx,)) arg_after = True was_specified = True break if argx < len(argv): if type_ is list: # HACK FOR LIST. TODO INTEGRATE if debug: print('[get_argval] ... argstr=%r' % (argstr,)) print('[get_argval] ... Found noequal list argx=%r' % (argx,)) arg_after = parse_arglist_hack(argx, argv=argv) if debug: print('[get_argval] ... arg_after=%r' % (arg_after,)) print('argv=%r' % (argv,)) if smartcast: arg_after = list(map(util_type.smart_cast2, arg_after)) if debug: print('[get_argval] ... smartcast arg_after=%r' % (arg_after,)) else: if debug: print('[get_argval] ... argstr=%r' % (argstr,)) print('[get_argval] ... Found type_=%r argx=%r' % (type_, argx,)) arg_after = argv[argx + 1] if type_ is not None: arg_after = util_type.try_cast(arg_after, type_) elif smartcast: arg_after = util_type.smart_cast2(arg_after) if was_specified: print('WARNING: argstr=%r already specified' % (argstr,)) was_specified = True break elif item.startswith(argstr + '='): val_after = ''.join(item.split('=')[1:]) if type_ is list: # HACK FOR LIST. TODO INTEGRATE if verbose: print('[get_argval] ... Found equal list') val_after_ = val_after.rstrip(']').lstrip('[') if True: # Hacker way to be less hacky about parsing lists from utool import util_gridsearch blocks = util_gridsearch.parse_nestings(val_after_) sentinal = '##COM&&' changed = [(block[0], block[1].replace(',', sentinal)) if block[0] == 'nonNested' else block for block in blocks] val_after2 = util_gridsearch.recombine_nestings(changed) arg_after = val_after2.split(sentinal) else: arg_after = val_after_.split(',') if smartcast: arg_after = list(map(util_type.smart_cast2, arg_after)) else: if type_ is None: if smartcast: arg_after = util_type.smart_cast2(val_after) else: arg_after = val_after else: arg_after = util_type.try_cast(val_after, type_) if not isinstance(type_, six.string_types) and issubclass(type_, six.string_types): if arg_after == 'None': # hack arg_after = None if was_specified: print('WARNING: argstr=%r already specified' % (argstr,)) was_specified = True break except Exception as ex: import utool as ut ut.printex(ex, 'problem in arg_val', keys=['type_']) if ut.SUPER_STRICT: raise pass if not was_specified and pos is not None: varargs = get_cmdline_varargs(argv) if len(varargs) > pos: arg_after = varargs[pos] assert type_ is not list, 'list not handled yet' if type_ is not None: arg_after = util_type.try_cast(arg_after, type_) elif smartcast: arg_after = util_type.smart_cast2(arg_after) if verbose: print('[get_argval] ... Parsed arg_after=%r, was_specified=%r' % (arg_after, was_specified)) if return_specified: return arg_after, was_specified else: return arg_after
[ "def", "get_argval", "(", "argstr_", ",", "type_", "=", "None", ",", "default", "=", "None", ",", "help_", "=", "None", ",", "smartcast", "=", "True", ",", "return_specified", "=", "None", ",", "argv", "=", "None", ",", "verbose", "=", "None", ",", "...
46.639706
21.959559
def _entity_list_as_bel(entities: Iterable[BaseEntity]) -> str: """Stringify a list of BEL entities.""" return ', '.join( e.as_bel() for e in entities )
[ "def", "_entity_list_as_bel", "(", "entities", ":", "Iterable", "[", "BaseEntity", "]", ")", "->", "str", ":", "return", "', '", ".", "join", "(", "e", ".", "as_bel", "(", ")", "for", "e", "in", "entities", ")" ]
29.166667
19
def create_resumable_upload_session( self, content_type=None, size=None, origin=None, client=None ): """Create a resumable upload session. Resumable upload sessions allow you to start an upload session from one client and complete the session in another. This method is called by the initiator to set the metadata and limits. The initiator then passes the session URL to the client that will upload the binary data. The client performs a PUT request on the session URL to complete the upload. This process allows untrusted clients to upload to an access-controlled bucket. For more details, see the `documentation on signed URLs`_. .. _documentation on signed URLs: https://cloud.google.com/storage/\ docs/access-control/signed-urls#signing-resumable The content type of the upload will be determined in order of precedence: - The value passed in to this method (if not :data:`None`) - The value stored on the current blob - The default value ('application/octet-stream') .. note:: The effect of uploading to an existing blob depends on the "versioning" and "lifecycle" policies defined on the blob's bucket. In the absence of those policies, upload will overwrite any existing contents. See the `object versioning <https://cloud.google.com/storage/docs/object-versioning>`_ and `lifecycle <https://cloud.google.com/storage/docs/lifecycle>`_ API documents for details. If :attr:`encryption_key` is set, the blob will be encrypted with a `customer-supplied`_ encryption key. If :attr:`user_project` is set on the bucket, bills the API request to that project. :type size: int :param size: (Optional). The maximum number of bytes that can be uploaded using this session. If the size is not known when creating the session, this should be left blank. :type content_type: str :param content_type: (Optional) Type of content being uploaded. :type origin: str :param origin: (Optional) If set, the upload can only be completed by a user-agent that uploads from the given origin. This can be useful when passing the session to a web client. :type client: :class:`~google.cloud.storage.client.Client` :param client: (Optional) The client to use. If not passed, falls back to the ``client`` stored on the blob's bucket. :rtype: str :returns: The resumable upload session URL. The upload can be completed by making an HTTP PUT request with the file's contents. :raises: :class:`google.cloud.exceptions.GoogleCloudError` if the session creation response returns an error status. """ extra_headers = {} if origin is not None: # This header is specifically for client-side uploads, it # determines the origins allowed for CORS. extra_headers["Origin"] = origin try: dummy_stream = BytesIO(b"") # Send a fake the chunk size which we **know** will be acceptable # to the `ResumableUpload` constructor. The chunk size only # matters when **sending** bytes to an upload. upload, _ = self._initiate_resumable_upload( client, dummy_stream, content_type, size, None, predefined_acl=None, extra_headers=extra_headers, chunk_size=self._CHUNK_SIZE_MULTIPLE, ) return upload.resumable_url except resumable_media.InvalidResponse as exc: _raise_from_invalid_response(exc)
[ "def", "create_resumable_upload_session", "(", "self", ",", "content_type", "=", "None", ",", "size", "=", "None", ",", "origin", "=", "None", ",", "client", "=", "None", ")", ":", "extra_headers", "=", "{", "}", "if", "origin", "is", "not", "None", ":",...
42.478261
23.847826
def get_files(self): """ Read and parse files from a directory, return a dictionary of path => post """ files = {} for filename in os.listdir(self.source): path = os.path.join(self.source, filename) files[filename] = frontmatter.load(path, filename=filename, slug=os.path.splitext(filename)[0]) return files
[ "def", "get_files", "(", "self", ")", ":", "files", "=", "{", "}", "for", "filename", "in", "os", ".", "listdir", "(", "self", ".", "source", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "source", ",", "filename", ")",...
31.538462
12.461538
def copy_to_file(self, name, fp_dest, callback=None): """Write cur_dir/name to file-like `fp_dest`. Args: name (str): file name, located in self.curdir fp_dest (file-like): must support write() method callback (function, optional): Called like `func(buf)` for every written chunk """ assert compat.is_native(name) def _write_to_file(data): # print("_write_to_file() {} bytes.".format(len(data))) fp_dest.write(data) if callback: callback(data) self.ftp.retrbinary( "RETR {}".format(name), _write_to_file, FtpTarget.DEFAULT_BLOCKSIZE )
[ "def", "copy_to_file", "(", "self", ",", "name", ",", "fp_dest", ",", "callback", "=", "None", ")", ":", "assert", "compat", ".", "is_native", "(", "name", ")", "def", "_write_to_file", "(", "data", ")", ":", "# print(\"_write_to_file() {} bytes.\".format(len(da...
35.45
18.65
def etd_ms_py2dict(elements): """Convert a Python object into a Python dictionary.""" metadata_dict = {} # Loop through all elements in the Python object. for element in elements.children: # Start an empty element list if an entry for the element # list hasn't been made in the dictionary. if element.tag not in metadata_dict: metadata_dict[element.tag] = [] element_dict = {} if hasattr(element, 'role'): element_dict['role'] = element.role elif hasattr(element, 'scheme'): element_dict['scheme'] = element.scheme elif hasattr(element, 'qualifier') and element.qualifier is not None \ and element.tag == 'title': element_dict['qualifier'] = element.qualifier # Set the element's content as a dictionary # of children elements. if element.children: child_dict = {} for child in element.children: if child.content is not None: child_dict[child.tag] = child.content element_dict['content'] = child_dict # Set element content that is not children. elif element.content is not None: if element.content.strip() != '': element_dict['content'] = element.content # Append the dictionary to the element list # if the element has content or children. if element_dict.get('content', False): metadata_dict[element.tag].append(element_dict) return metadata_dict
[ "def", "etd_ms_py2dict", "(", "elements", ")", ":", "metadata_dict", "=", "{", "}", "# Loop through all elements in the Python object.", "for", "element", "in", "elements", ".", "children", ":", "# Start an empty element list if an entry for the element", "# list hasn't been ma...
43.742857
11.257143
def read_chunk(filename, offset=-1, length=-1, escape_data=False): """ Read a chunk of a file from an offset upto the length. """ try: length = int(length) offset = int(offset) except ValueError: return {} if not os.path.isfile(filename): return {} try: fstat = os.stat(filename) except Exception: return {} if offset == -1: offset = fstat.st_size if length == -1: length = fstat.st_size - offset with open(filename, "r") as fp: fp.seek(offset) try: data = fp.read(length) except IOError: return {} if data: data = _escape_data(data) if escape_data else data return dict(offset=offset, length=len(data), data=data) return dict(offset=offset, length=0)
[ "def", "read_chunk", "(", "filename", ",", "offset", "=", "-", "1", ",", "length", "=", "-", "1", ",", "escape_data", "=", "False", ")", ":", "try", ":", "length", "=", "int", "(", "length", ")", "offset", "=", "int", "(", "offset", ")", "except", ...
19.805556
22.416667
def set_icon(self, icon, qtgui_module): """Save the icon and set its attributes.""" if self._use_fallback: icon.addFile(self._fallback) else: for role, pixmap in self._roles.items(): if role.endswith("off"): mode = role[:-3] state = qtgui_module.QIcon.Off elif role.endswith("on"): mode = role[:-2] state = qtgui_module.QIcon.On else: continue mode = getattr(qtgui_module.QIcon, mode.title()) if pixmap: icon.addPixmap(qtgui_module.QPixmap(pixmap), mode, state) else: icon.addPixmap(qtgui_module.QPixmap(), mode, state) self.icon = icon
[ "def", "set_icon", "(", "self", ",", "icon", ",", "qtgui_module", ")", ":", "if", "self", ".", "_use_fallback", ":", "icon", ".", "addFile", "(", "self", ".", "_fallback", ")", "else", ":", "for", "role", ",", "pixmap", "in", "self", ".", "_roles", "...
33.708333
17.083333
def verify_create_instance(self, **kwargs): """Verifies an instance creation command. Without actually placing an order. See :func:`create_instance` for a list of available options. Example:: new_vsi = { 'domain': u'test01.labs.sftlyr.ws', 'hostname': u'minion05', 'datacenter': u'hkg02', 'flavor': 'BL1_1X2X100' 'dedicated': False, 'private': False, 'os_code' : u'UBUNTU_LATEST', 'hourly': True, 'ssh_keys': [1234], 'disks': ('100','25'), 'local_disk': True, 'tags': 'test, pleaseCancel', 'public_security_groups': [12, 15] } vsi = mgr.verify_create_instance(**new_vsi) # vsi will be a SoftLayer_Container_Product_Order_Virtual_Guest # if your order is correct. Otherwise you will get an exception print vsi """ kwargs.pop('tags', None) create_options = self._generate_create_dict(**kwargs) return self.guest.generateOrderTemplate(create_options)
[ "def", "verify_create_instance", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "pop", "(", "'tags'", ",", "None", ")", "create_options", "=", "self", ".", "_generate_create_dict", "(", "*", "*", "kwargs", ")", "return", "self", ".", "gue...
36.375
15.0625
def zip_job(job_ini, archive_zip='', risk_ini='', oq=None, log=logging.info): """ Zip the given job.ini file into the given archive, together with all related files. """ if not os.path.exists(job_ini): sys.exit('%s does not exist' % job_ini) archive_zip = archive_zip or 'job.zip' if isinstance(archive_zip, str): # actually it should be path-like if not archive_zip.endswith('.zip'): sys.exit('%s does not end with .zip' % archive_zip) if os.path.exists(archive_zip): sys.exit('%s exists already' % archive_zip) # do not validate to avoid permissions error on the export_dir oq = oq or readinput.get_oqparam(job_ini, validate=False) if risk_ini: risk_ini = os.path.normpath(os.path.abspath(risk_ini)) risk_inputs = readinput.get_params([risk_ini])['inputs'] del risk_inputs['job_ini'] oq.inputs.update(risk_inputs) files = readinput.get_input_files(oq) if risk_ini: files = [risk_ini] + files return general.zipfiles(files, archive_zip, log=log)
[ "def", "zip_job", "(", "job_ini", ",", "archive_zip", "=", "''", ",", "risk_ini", "=", "''", ",", "oq", "=", "None", ",", "log", "=", "logging", ".", "info", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "job_ini", ")", ":", "sys"...
44.333333
14.666667
def assert_between(lower_bound, upper_bound, expr, msg_fmt="{msg}"): """Fail if an expression is not between certain bounds (inclusive). >>> assert_between(5, 15, 5) >>> assert_between(5, 15, 15) >>> assert_between(5, 15, 4.9) Traceback (most recent call last): ... AssertionError: 4.9 is not between 5 and 15 The following msg_fmt arguments are supported: * msg - the default error message * lower - lower bound * upper - upper bound * expr - tested expression """ if not lower_bound <= expr <= upper_bound: msg = "{!r} is not between {} and {}".format( expr, lower_bound, upper_bound ) fail( msg_fmt.format( msg=msg, lower=lower_bound, upper=upper_bound, expr=expr ) )
[ "def", "assert_between", "(", "lower_bound", ",", "upper_bound", ",", "expr", ",", "msg_fmt", "=", "\"{msg}\"", ")", ":", "if", "not", "lower_bound", "<=", "expr", "<=", "upper_bound", ":", "msg", "=", "\"{!r} is not between {} and {}\"", ".", "format", "(", "...
30.461538
17
def requires_grad(m:nn.Module, b:Optional[bool]=None)->Optional[bool]: "If `b` is not set return `requires_grad` of first param, else set `requires_grad` on all params as `b`" ps = list(m.parameters()) if not ps: return None if b is None: return ps[0].requires_grad for p in ps: p.requires_grad=b
[ "def", "requires_grad", "(", "m", ":", "nn", ".", "Module", ",", "b", ":", "Optional", "[", "bool", "]", "=", "None", ")", "->", "Optional", "[", "bool", "]", ":", "ps", "=", "list", "(", "m", ".", "parameters", "(", ")", ")", "if", "not", "ps"...
51.833333
22.166667
def restore_point(cls, cluster_id_label, s3_location, backup_id, table_names, overwrite=True, automatic=True): """ Restoring cluster from a given hbase snapshot id """ conn = Qubole.agent(version=Cluster.api_version) parameters = {} parameters['s3_location'] = s3_location parameters['backup_id'] = backup_id parameters['table_names'] = table_names parameters['overwrite'] = overwrite parameters['automatic'] = automatic return conn.post(cls.element_path(cluster_id_label) + "/restore_point", data=parameters)
[ "def", "restore_point", "(", "cls", ",", "cluster_id_label", ",", "s3_location", ",", "backup_id", ",", "table_names", ",", "overwrite", "=", "True", ",", "automatic", "=", "True", ")", ":", "conn", "=", "Qubole", ".", "agent", "(", "version", "=", "Cluste...
48.833333
16.5
def node_is_embedded_doc_attr(node): """Checks if a node is a valid field or method in a embedded document. """ embedded_doc = get_field_embedded_doc(node.last_child()) name = node.attrname try: r = bool(embedded_doc.lookup(name)[1][0]) except IndexError: r = False return r
[ "def", "node_is_embedded_doc_attr", "(", "node", ")", ":", "embedded_doc", "=", "get_field_embedded_doc", "(", "node", ".", "last_child", "(", ")", ")", "name", "=", "node", ".", "attrname", "try", ":", "r", "=", "bool", "(", "embedded_doc", ".", "lookup", ...
28.090909
17.272727
def import_obj(cls, i_datasource, import_time=None): """Imports the datasource from the object to the database. Metrics and columns and datasource will be overrided if exists. This function can be used to import/export dashboards between multiple superset instances. Audit metadata isn't copies over. """ def lookup_sqlatable(table): return db.session.query(SqlaTable).join(Database).filter( SqlaTable.table_name == table.table_name, SqlaTable.schema == table.schema, Database.id == table.database_id, ).first() def lookup_database(table): return db.session.query(Database).filter_by( database_name=table.params_dict['database_name']).one() return import_datasource.import_datasource( db.session, i_datasource, lookup_database, lookup_sqlatable, import_time)
[ "def", "import_obj", "(", "cls", ",", "i_datasource", ",", "import_time", "=", "None", ")", ":", "def", "lookup_sqlatable", "(", "table", ")", ":", "return", "db", ".", "session", ".", "query", "(", "SqlaTable", ")", ".", "join", "(", "Database", ")", ...
46.6
19.15
def precision_series(y_true, y_score, k=None): """ Returns series of length k whose i-th entry is the precision in the top i TODO: extrapolate here """ y_true, y_score = to_float(y_true, y_score) top = _argsort(y_score, k) n = np.nan_to_num(y_true[top]).cumsum() # fill missing labels with 0 d = (~np.isnan(y_true[top])).cumsum() # count number of labels return pd.Series(n/d, index=np.arange(1, len(n)+1))
[ "def", "precision_series", "(", "y_true", ",", "y_score", ",", "k", "=", "None", ")", ":", "y_true", ",", "y_score", "=", "to_float", "(", "y_true", ",", "y_score", ")", "top", "=", "_argsort", "(", "y_score", ",", "k", ")", "n", "=", "np", ".", "n...
39.727273
17.363636
def juliandate(time: datetime) -> float: """ Python datetime to Julian time from D.Vallado Fundamentals of Astrodynamics and Applications p.187 and J. Meeus Astronomical Algorithms 1991 Eqn. 7.1 pg. 61 Parameters ---------- time : datetime.datetime time to convert Results ------- jd : float Julian date """ times = np.atleast_1d(time) assert times.ndim == 1 jd = np.empty(times.size) for i, t in enumerate(times): if t.month < 3: year = t.year - 1 month = t.month + 12 else: year = t.year month = t.month A = int(year / 100.0) B = 2 - A + int(A / 4.) C = ((t.second / 60. + t.minute) / 60. + t.hour) / 24. jd[i] = (int(365.25 * (year + 4716)) + int(30.6001 * (month + 1)) + t.day + B - 1524.5 + C) return jd.squeeze()
[ "def", "juliandate", "(", "time", ":", "datetime", ")", "->", "float", ":", "times", "=", "np", ".", "atleast_1d", "(", "time", ")", "assert", "times", ".", "ndim", "==", "1", "jd", "=", "np", ".", "empty", "(", "times", ".", "size", ")", "for", ...
22.075
21.775
def as_xml(self,parent): """Create vcard-tmp XML representation of the field. :Parameters: - `parent`: parent node for the element :Types: - `parent`: `libxml2.xmlNode` :return: xml node with the field data. :returntype: `libxml2.xmlNode`""" n=parent.newChild(None,"TEL",None) for t in ("home","work","voice","fax","pager","msg","cell","video", "bbs","modem","isdn","pcs","pref"): if t in self.type: n.newChild(None,t.upper(),None) n.newTextChild(None,"NUMBER",to_utf8(self.number)) return n
[ "def", "as_xml", "(", "self", ",", "parent", ")", ":", "n", "=", "parent", ".", "newChild", "(", "None", ",", "\"TEL\"", ",", "None", ")", "for", "t", "in", "(", "\"home\"", ",", "\"work\"", ",", "\"voice\"", ",", "\"fax\"", ",", "\"pager\"", ",", ...
36.294118
15.647059
def genty(target_cls): """ This decorator takes the information provided by @genty_dataset, @genty_dataprovider, and @genty_repeat and generates the corresponding test methods. :param target_cls: Test class whose test methods have been decorated. :type target_cls: `class` """ tests = _expand_tests(target_cls) tests_with_datasets = _expand_datasets(tests) tests_with_datasets_and_repeats = _expand_repeats(tests_with_datasets) _add_new_test_methods(target_cls, tests_with_datasets_and_repeats) return target_cls
[ "def", "genty", "(", "target_cls", ")", ":", "tests", "=", "_expand_tests", "(", "target_cls", ")", "tests_with_datasets", "=", "_expand_datasets", "(", "tests", ")", "tests_with_datasets_and_repeats", "=", "_expand_repeats", "(", "tests_with_datasets", ")", "_add_new...
31.222222
22.111111
def backward(self, diff_x, influences, activations, **kwargs): """ Backward pass through the network, including update. Parameters ---------- diff_x : numpy array A matrix containing the differences between the input and neurons. influences : numpy array A matrix containing the influence each neuron has on each other neuron. This is used to calculate the updates. activations : numpy array The activations each neuron has to each data point. This is used to calculate the BMU. Returns ------- update : numpy array A numpy array containing the updates to the neurons. """ bmu = self._get_bmu(activations) influence = influences[bmu] update = np.multiply(diff_x, influence) return update
[ "def", "backward", "(", "self", ",", "diff_x", ",", "influences", ",", "activations", ",", "*", "*", "kwargs", ")", ":", "bmu", "=", "self", ".", "_get_bmu", "(", "activations", ")", "influence", "=", "influences", "[", "bmu", "]", "update", "=", "np",...
34.32
19.36
def __process_warc_gz_file(self, path_name): """ Iterates all transactions in one WARC file and for each transaction tries to extract an article object. Afterwards, each article is checked against the filter criteria and if all are passed, the function on_valid_article_extracted is invoked with the article object. :param path_name: :return: """ counter_article_total = 0 counter_article_passed = 0 counter_article_discarded = 0 start_time = time.time() with open(path_name, 'rb') as stream: for record in ArchiveIterator(stream): # try: if record.rec_type == 'response': counter_article_total += 1 # if the article passes filter tests, we notify the user filter_pass, article = self.__filter_record(record) if filter_pass: counter_article_passed += 1 if not article: article = NewsPlease.from_warc(record) self.__logger.info('article pass (%s; %s; %s)', article.source_domain, article.date_publish, article.title) self.__callback_on_article_extracted(article) else: counter_article_discarded += 1 if article: self.__logger.info('article discard (%s; %s; %s)', article.source_domain, article.date_publish, article.title) else: self.__logger.info('article discard (%s)', record.rec_headers.get_header('WARC-Target-URI')) if counter_article_total % 10 == 0: elapsed_secs = time.time() - start_time secs_per_article = elapsed_secs / counter_article_total self.__logger.info('statistics') self.__logger.info('pass = %i, discard = %i, total = %i', counter_article_passed, counter_article_discarded, counter_article_total) self.__logger.info('extraction from current WARC file started %s; %f s/article', human(start_time), secs_per_article) # except: # if self.__continue_after_error: # self.__logger.error('Unexpected error: %s', sys.exc_info()[0]) # pass # else: # raise # cleanup if self.__delete_warc_after_extraction: os.remove(path_name) self.__register_fully_extracted_warc_file(self.__warc_download_url)
[ "def", "__process_warc_gz_file", "(", "self", ",", "path_name", ")", ":", "counter_article_total", "=", "0", "counter_article_passed", "=", "0", "counter_article_discarded", "=", "0", "start_time", "=", "time", ".", "time", "(", ")", "with", "open", "(", "path_n...
48.57377
25.491803
def _apply_padding(lhs_arr, rhs_arr, offset, pad_mode, direction): """Apply padding to ``lhs_arr`` according to ``pad_mode``. This helper assigns the values in the excess parts (if existent) of ``lhs_arr`` according to the provided padding mode. This applies to the following values for ``pad_mode``: ``periodic``, ``symmetric``, ``order0``, ``order1`` See `the online documentation <https://odlgroup.github.io/odl/math/resizing_ops.html>`_ on resizing operators for details. """ if pad_mode not in ('periodic', 'symmetric', 'order0', 'order1'): return full_slc = [slice(None)] * lhs_arr.ndim intersec_slc, _ = _intersection_slice_tuples(lhs_arr, rhs_arr, offset) if direction == 'forward': working_slc = list(intersec_slc) else: working_slc = list(full_slc) # TODO: order axes according to padding size for optimization (largest # last)? Axis strides could be important, too. for axis, (n_lhs, n_rhs) in enumerate(zip(lhs_arr.shape, rhs_arr.shape)): if n_lhs <= n_rhs: continue # restriction, nothing to do n_pad_l = offset[axis] n_pad_r = n_lhs - n_rhs - n_pad_l # Error scenarios with illegal lengths if pad_mode == 'order0' and n_rhs == 0: raise ValueError('in axis {}: the smaller array must have size ' '>= 1 for order 0 padding, got 0' ''.format(axis)) if pad_mode == 'order1' and n_rhs < 2: raise ValueError('in axis {}: the smaller array must have size ' '>= 2 for order 1 padding, got {}' ''.format(axis, n_rhs)) for lr, pad_len in [('left', n_pad_l), ('right', n_pad_r)]: if pad_mode == 'periodic' and pad_len > n_rhs: raise ValueError('in axis {}: {} padding length {} exceeds ' 'the size {} of the smaller array; this is ' 'not allowed for periodic padding' ''.format(axis, lr, pad_len, n_rhs)) elif pad_mode == 'symmetric' and pad_len >= n_rhs: raise ValueError('in axis {}: {} padding length {} is larger ' 'or equal to the size {} of the smaller ' 'array; this is not allowed for symmetric ' 'padding' ''.format(axis, lr, pad_len, n_rhs)) # Slice tuples used to index LHS and RHS for left and right padding, # respectively; we make 4 copies of `working_slc` as lists lhs_slc_l, lhs_slc_r, rhs_slc_l, rhs_slc_r = map( list, [working_slc] * 4) # We're always using the outer (excess) parts involved in padding # on the LHS of the assignment, so we set them here. pad_slc_outer_l, pad_slc_outer_r = _padding_slices_outer( lhs_arr, rhs_arr, axis, offset) if direction == 'forward': lhs_slc_l[axis] = pad_slc_outer_l lhs_slc_r[axis] = pad_slc_outer_r else: rhs_slc_l[axis] = pad_slc_outer_l rhs_slc_r[axis] = pad_slc_outer_r if pad_mode in ('periodic', 'symmetric'): pad_slc_inner_l, pad_slc_inner_r = _padding_slices_inner( lhs_arr, rhs_arr, axis, offset, pad_mode) # Using `lhs_arr` on both sides of the assignment such that the # shapes match and the "corner" blocks are properly assigned # or used in the addition for the adjoint, respectively. if direction == 'forward': rhs_slc_l[axis] = pad_slc_inner_l rhs_slc_r[axis] = pad_slc_inner_r lhs_slc_l, rhs_slc_l, lhs_slc_r, rhs_slc_r = map( tuple, [lhs_slc_l, rhs_slc_l, lhs_slc_r, rhs_slc_r]) lhs_arr[lhs_slc_l] = lhs_arr[rhs_slc_l] lhs_arr[lhs_slc_r] = lhs_arr[rhs_slc_r] else: lhs_slc_l[axis] = pad_slc_inner_l lhs_slc_r[axis] = pad_slc_inner_r lhs_slc_l, rhs_slc_l, lhs_slc_r, rhs_slc_r = map( tuple, [lhs_slc_l, rhs_slc_l, lhs_slc_r, rhs_slc_r]) lhs_arr[lhs_slc_l] += lhs_arr[rhs_slc_l] lhs_arr[lhs_slc_r] += lhs_arr[rhs_slc_r] elif pad_mode == 'order0': # The `_padding_slices_inner` helper returns the slices for the # boundary values. left_slc, right_slc = _padding_slices_inner( lhs_arr, rhs_arr, axis, offset, pad_mode) if direction == 'forward': rhs_slc_l[axis] = left_slc rhs_slc_r[axis] = right_slc lhs_slc_l, rhs_slc_l, lhs_slc_r, rhs_slc_r = map( tuple, [lhs_slc_l, rhs_slc_l, lhs_slc_r, rhs_slc_r]) lhs_arr[lhs_slc_l] = lhs_arr[rhs_slc_l] lhs_arr[lhs_slc_r] = lhs_arr[rhs_slc_r] else: lhs_slc_l[axis] = left_slc lhs_slc_r[axis] = right_slc lhs_slc_l, rhs_slc_l, lhs_slc_r, rhs_slc_r = map( tuple, [lhs_slc_l, rhs_slc_l, lhs_slc_r, rhs_slc_r]) lhs_arr[lhs_slc_l] += np.sum( lhs_arr[rhs_slc_l], axis=axis, keepdims=True, dtype=lhs_arr.dtype) lhs_arr[lhs_slc_r] += np.sum( lhs_arr[rhs_slc_r], axis=axis, keepdims=True, dtype=lhs_arr.dtype) elif pad_mode == 'order1': # Some extra work necessary: need to compute the derivative at # the boundary and use that to continue with constant derivative. # Slice for broadcasting of a 1D array along `axis` bcast_slc = [None] * lhs_arr.ndim bcast_slc[axis] = slice(None) bcast_slc = tuple(bcast_slc) # Slices for the boundary in `axis` left_slc, right_slc = _padding_slices_inner( lhs_arr, rhs_arr, axis, offset, pad_mode) # Create slice tuples for indexing of the boundary values bdry_slc_l = list(working_slc) bdry_slc_l[axis] = left_slc bdry_slc_l = tuple(bdry_slc_l) bdry_slc_r = list(working_slc) bdry_slc_r[axis] = right_slc bdry_slc_r = tuple(bdry_slc_r) # For the slope at the boundary, we need two neighboring points. # We create the corresponding slices from the boundary slices. slope_slc_l = list(working_slc) slope_slc_l[axis] = slice(left_slc.start, left_slc.stop + 1) slope_slc_l = tuple(slope_slc_l) slope_slc_r = list(working_slc) slope_slc_r[axis] = slice(right_slc.start - 1, right_slc.stop) slope_slc_r = tuple(slope_slc_r) # The `np.arange`s, broadcast along `axis`, are used to create the # constant-slope continuation (forward) or to calculate the # first order moments (adjoint). arange_l = np.arange(-n_pad_l, 0, dtype=lhs_arr.dtype)[bcast_slc] arange_r = np.arange(1, n_pad_r + 1, dtype=lhs_arr.dtype)[bcast_slc] lhs_slc_l, rhs_slc_l, lhs_slc_r, rhs_slc_r = map( tuple, [lhs_slc_l, rhs_slc_l, lhs_slc_r, rhs_slc_r]) if direction == 'forward': # Take first order difference to get the derivative # along `axis`. slope_l = np.diff(lhs_arr[slope_slc_l], n=1, axis=axis) slope_r = np.diff(lhs_arr[slope_slc_r], n=1, axis=axis) # Finally assign the constant slope values lhs_arr[lhs_slc_l] = lhs_arr[bdry_slc_l] + arange_l * slope_l lhs_arr[lhs_slc_r] = lhs_arr[bdry_slc_r] + arange_r * slope_r else: # Same as in 'order0' lhs_arr[bdry_slc_l] += np.sum(lhs_arr[rhs_slc_l], axis=axis, keepdims=True, dtype=lhs_arr.dtype) lhs_arr[bdry_slc_r] += np.sum(lhs_arr[rhs_slc_r], axis=axis, keepdims=True, dtype=lhs_arr.dtype) # Calculate the order 1 moments moment1_l = np.sum(arange_l * lhs_arr[rhs_slc_l], axis=axis, keepdims=True, dtype=lhs_arr.dtype) moment1_r = np.sum(arange_r * lhs_arr[rhs_slc_r], axis=axis, keepdims=True, dtype=lhs_arr.dtype) # Add moment1 at the "width-2 boundary layers", with the sign # corresponding to the sign in the derivative calculation # of the forward padding. sign = np.array([-1, 1])[bcast_slc] lhs_arr[slope_slc_l] += moment1_l * sign lhs_arr[slope_slc_r] += moment1_r * sign if direction == 'forward': working_slc[axis] = full_slc[axis] else: working_slc[axis] = intersec_slc[axis]
[ "def", "_apply_padding", "(", "lhs_arr", ",", "rhs_arr", ",", "offset", ",", "pad_mode", ",", "direction", ")", ":", "if", "pad_mode", "not", "in", "(", "'periodic'", ",", "'symmetric'", ",", "'order0'", ",", "'order1'", ")", ":", "return", "full_slc", "="...
44.941463
21.443902
def name_resolve(self, name=None, recursive=False, nocache=False, **kwargs): """Gets the value currently published at an IPNS name. IPNS is a PKI namespace, where names are the hashes of public keys, and the private key enables publishing new (signed) values. In resolve, the default value of ``name`` is your own identity public key. .. code-block:: python >>> c.name_resolve() {'Path': '/ipfs/QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d'} Parameters ---------- name : str The IPNS name to resolve (defaults to the connected node) recursive : bool Resolve until the result is not an IPFS name (default: false) nocache : bool Do not use cached entries (default: false) Returns ------- dict : The IPFS path the IPNS hash points at """ kwargs.setdefault("opts", {"recursive": recursive, "nocache": nocache}) args = (name,) if name is not None else () return self._client.request('/name/resolve', args, decoder='json', **kwargs)
[ "def", "name_resolve", "(", "self", ",", "name", "=", "None", ",", "recursive", "=", "False", ",", "nocache", "=", "False", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "setdefault", "(", "\"opts\"", ",", "{", "\"recursive\"", ":", "recursive", ",...
38.548387
22.516129
def _delete_nve_db(self, vni, device_id, mcast_group, host_id): """Delete the nexus NVE database entry. Called during delete precommit port event. """ rows = nxos_db.get_nve_vni_deviceid_bindings(vni, device_id) for row in rows: nxos_db.remove_nexusnve_binding(vni, row.switch_ip, device_id)
[ "def", "_delete_nve_db", "(", "self", ",", "vni", ",", "device_id", ",", "mcast_group", ",", "host_id", ")", ":", "rows", "=", "nxos_db", ".", "get_nve_vni_deviceid_bindings", "(", "vni", ",", "device_id", ")", "for", "row", "in", "rows", ":", "nxos_db", "...
42.125
18.875
def getTableName(self, tableClass): """ Retrieve the fully qualified name of the table holding items of a particular class in this store. If the table does not exist in the database, it will be created as a side-effect. @param tableClass: an Item subclass @raises axiom.errors.ItemClassesOnly: if an object other than a subclass of Item is passed. @return: a string """ if not (isinstance(tableClass, type) and issubclass(tableClass, item.Item)): raise errors.ItemClassesOnly("Only subclasses of Item have table names.") if tableClass not in self.typeToTableNameCache: self.typeToTableNameCache[tableClass] = self._tableNameFor(tableClass.typeName, tableClass.schemaVersion) # make sure the table exists self.getTypeID(tableClass) return self.typeToTableNameCache[tableClass]
[ "def", "getTableName", "(", "self", ",", "tableClass", ")", ":", "if", "not", "(", "isinstance", "(", "tableClass", ",", "type", ")", "and", "issubclass", "(", "tableClass", ",", "item", ".", "Item", ")", ")", ":", "raise", "errors", ".", "ItemClassesOnl...
43.238095
23.428571
def error(self, msg, file=None): """ Outputs the error msg to the file if specified, or to the io_manager's stderr if available, or to sys.stderr. """ self.error_encountered = True file.write(self.error_prefix) file.write(msg) file.write('\n') file.flush()
[ "def", "error", "(", "self", ",", "msg", ",", "file", "=", "None", ")", ":", "self", ".", "error_encountered", "=", "True", "file", ".", "write", "(", "self", ".", "error_prefix", ")", "file", ".", "write", "(", "msg", ")", "file", ".", "write", "(...
31.9
11.1
def check_tune_params_list(tune_params): """ raise an exception if a tune parameter has a forbidden name """ forbidden_names = ("grid_size_x", "grid_size_y", "grid_size_z") forbidden_name_substr = ("time", "times") for name, param in tune_params.items(): if name in forbidden_names: raise ValueError("Tune parameter " + name + " with value " + str(param) + " has a forbidden name!") for forbidden_substr in forbidden_name_substr: if forbidden_substr in name: raise ValueError("Tune parameter " + name + " with value " + str(param) + " has a forbidden name: not allowed to use " + forbidden_substr + " in tune parameter names!")
[ "def", "check_tune_params_list", "(", "tune_params", ")", ":", "forbidden_names", "=", "(", "\"grid_size_x\"", ",", "\"grid_size_y\"", ",", "\"grid_size_z\"", ")", "forbidden_name_substr", "=", "(", "\"time\"", ",", "\"times\"", ")", "for", "name", ",", "param", "...
69
26.9
def mean(self): """Compute mean of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex. """ if self._can_use_new_school(): self._prep_spark_sql_groupby() import pyspark.sql.functions as func return self._use_aggregation(func.mean) self._prep_pandas_groupby() return DataFrame.fromDataFrameRDD( self._regroup_mergedRDD().values().map( lambda x: x.mean()), self.sql_ctx)
[ "def", "mean", "(", "self", ")", ":", "if", "self", ".", "_can_use_new_school", "(", ")", ":", "self", ".", "_prep_spark_sql_groupby", "(", ")", "import", "pyspark", ".", "sql", ".", "functions", "as", "func", "return", "self", ".", "_use_aggregation", "("...
39.461538
11.230769
def _walk(directory, enable_scandir=False, **kwargs): """ Internal function to return walk generator either from os or scandir :param directory: directory to traverse :param enable_scandir: on python < 3.5 enable external scandir package :param kwargs: arguments to pass to walk function :return: walk generator """ walk = os.walk if python_version < (3, 5) and enable_scandir: import scandir walk = scandir.walk return walk(directory, **kwargs)
[ "def", "_walk", "(", "directory", ",", "enable_scandir", "=", "False", ",", "*", "*", "kwargs", ")", ":", "walk", "=", "os", ".", "walk", "if", "python_version", "<", "(", "3", ",", "5", ")", "and", "enable_scandir", ":", "import", "scandir", "walk", ...
34.928571
15.357143
def evolve(self, new_date): """ evolve to the new process state at the next date :param date new_date: date or point in time of the new state :return State: """ if self.state.date == new_date and not self.initial_state.date == new_date: return self.state if self._len: q = [self.random.gauss(0., 1.) for _ in range(int(self._len))] else: q = self.random.gauss(0., 1.) self.state.value = self.func(self.state.value, self.state.date, new_date, q) self.state.date = new_date return self.state
[ "def", "evolve", "(", "self", ",", "new_date", ")", ":", "if", "self", ".", "state", ".", "date", "==", "new_date", "and", "not", "self", ".", "initial_state", ".", "date", "==", "new_date", ":", "return", "self", ".", "state", "if", "self", ".", "_l...
37.4375
19.6875
def forall(self, vars_list: List[str]) -> 'TensorFluent': '''Returns the TensorFluent for the forall aggregation function. Args: vars_list: The list of variables to be aggregated over. Returns: A TensorFluent wrapping the forall aggregation function. ''' return self._aggregation_op(tf.reduce_all, self, vars_list)
[ "def", "forall", "(", "self", ",", "vars_list", ":", "List", "[", "str", "]", ")", "->", "'TensorFluent'", ":", "return", "self", ".", "_aggregation_op", "(", "tf", ".", "reduce_all", ",", "self", ",", "vars_list", ")" ]
37.1
29.1
def main(): """Program entry point. """ global cf_verbose, cf_show_comment, cf_charset global cf_extract, cf_test_read, cf_test_unrar global cf_test_memory psw = None # parse args try: opts, args = getopt.getopt(sys.argv[1:], 'p:C:hvcxtRM') except getopt.error as ex: print(str(ex), file=sys.stderr) sys.exit(1) for o, v in opts: if o == '-p': psw = v elif o == '-h': xprint(usage) return elif o == '-v': cf_verbose += 1 elif o == '-c': cf_show_comment = 1 elif o == '-x': cf_extract = 1 elif o == '-t': cf_test_read += 1 elif o == '-T': cf_test_unrar = 1 elif o == '-M': cf_test_memory = 1 elif o == '-C': cf_charset = v else: raise Exception("unhandled switch: " + o) args2 = [] for a in args: if a[0] == "@": for ln in open(a[1:], 'r'): fn = ln[:-1] args2.append(fn) else: args2.append(a) args = args2 if not args: xprint(usage) # pypy .readinto()+memoryview() is buggy #if cf_test_read > 1 and hasattr(sys, 'pypy_version_info'): # cf_test_read = 1 for fn in args: test(fn, psw)
[ "def", "main", "(", ")", ":", "global", "cf_verbose", ",", "cf_show_comment", ",", "cf_charset", "global", "cf_extract", ",", "cf_test_read", ",", "cf_test_unrar", "global", "cf_test_memory", "psw", "=", "None", "# parse args", "try", ":", "opts", ",", "args", ...
22.931034
19.137931
def get_element(self, tag_name, attribute, **attribute_filter): """ Return element in xml files which match with the tag name and the specific attribute :param tag_name: specify the tag name :type tag_name: string :param attribute: specify the attribute :type attribute: string :rtype: string """ for i in self.xml: if self.xml[i] is None : continue tag = self.xml[i].getElementsByTagName(tag_name) if tag is None: return None for item in tag: skip_this_item = False for attr, val in list(attribute_filter.items()): attr_val = item.getAttributeNS(NS_ANDROID_URI, attr) if attr_val != val: skip_this_item = True break if skip_this_item: continue value = item.getAttributeNS(NS_ANDROID_URI, attribute) if len(value) > 0: return value return None
[ "def", "get_element", "(", "self", ",", "tag_name", ",", "attribute", ",", "*", "*", "attribute_filter", ")", ":", "for", "i", "in", "self", ".", "xml", ":", "if", "self", ".", "xml", "[", "i", "]", "is", "None", ":", "continue", "tag", "=", "self"...
33.575758
17.393939
def get_key(self, key, bucket_name=None): """ Returns a boto3.s3.Object :param key: the path to the key :type key: str :param bucket_name: the name of the bucket :type bucket_name: str """ if not bucket_name: (bucket_name, key) = self.parse_s3_url(key) obj = self.get_resource_type('s3').Object(bucket_name, key) obj.load() return obj
[ "def", "get_key", "(", "self", ",", "key", ",", "bucket_name", "=", "None", ")", ":", "if", "not", "bucket_name", ":", "(", "bucket_name", ",", "key", ")", "=", "self", ".", "parse_s3_url", "(", "key", ")", "obj", "=", "self", ".", "get_resource_type",...
28.133333
15.066667
def send_job_and_wait(self, message, body_params=None, timeout=None, raises=False): """.. versionchanged:: 0.8.4 Send a message as a job and wait for the response. .. note:: Not all messages are jobs, you'll have to find out which are which :param message: a message instance :type message: :class:`.Msg`, :class:`.MsgProto` :param body_params: a dict with params to the body (only :class:`.MsgProto`) :type body_params: dict :param timeout: (optional) seconds to wait :type timeout: :class:`int` :param raises: (optional) On timeout if ``False`` return ``None``, else raise ``gevent.Timeout`` :type raises: :class:`bool` :return: response proto message :rtype: :class:`.Msg`, :class:`.MsgProto` :raises: ``gevent.Timeout`` """ job_id = self.send_job(message, body_params) response = self.wait_event(job_id, timeout, raises=raises) if response is None: return None return response[0].body
[ "def", "send_job_and_wait", "(", "self", ",", "message", ",", "body_params", "=", "None", ",", "timeout", "=", "None", ",", "raises", "=", "False", ")", ":", "job_id", "=", "self", ".", "send_job", "(", "message", ",", "body_params", ")", "response", "="...
43.458333
18.666667
def get_functionalHome(self, functionalHomeType: type) -> FunctionalHome: """ gets the specified functionalHome Args: functionalHome(type): the type of the functionalHome which should be returned Returns: the FunctionalHome or None if it couldn't be found """ for x in self.functionalHomes: if isinstance(x, functionalHomeType): return x return None
[ "def", "get_functionalHome", "(", "self", ",", "functionalHomeType", ":", "type", ")", "->", "FunctionalHome", ":", "for", "x", "in", "self", ".", "functionalHomes", ":", "if", "isinstance", "(", "x", ",", "functionalHomeType", ")", ":", "return", "x", "retu...
32.857143
22.285714
def mysql( self, tableName, filepath=None, createStatement=None ): """*Render the dataset as a series of mysql insert statements* **Key Arguments:** - ``tableName`` -- the name of the mysql db table to assign the insert statements to. - ``filepath`` -- path to the file to write the mysql inserts content to. Default *None* createStatement **Return:** - ``renderedData`` -- the data rendered mysql insert statements (string format) **Usage:** .. code-block:: python print dataSet.mysql("testing_table") this output the following: .. code-block:: plain INSERT INTO `testing_table` (address,dateCreated,owner,pet) VALUES ("belfast, uk" ,"2016-09-14T16:21:36" ,"daisy" ,"dog") ON DUPLICATE KEY UPDATE address="belfast, uk", dateCreated="2016-09-14T16:21:36", owner="daisy", pet="dog" ; INSERT INTO `testing_table` (address,dateCreated,owner,pet) VALUES ("the moon" ,"2016-09-14T16:21:36" ,"john" ,"snake") ON DUPLICATE KEY UPDATE address="the moon", dateCreated="2016-09-14T16:21:36", owner="john", pet="snake" ; INSERT INTO `testing_table` (address,dateCreated,owner,pet) VALUES ("larne" ,"2016-09-14T16:21:36" ,"susan" ,"crocodile") ON DUPLICATE KEY UPDATE address="larne", dateCreated="2016-09-14T16:21:36", owner="susan", pet="crocodile" ; To save this rendering to file use: .. code-block:: python dataSet.mysql("testing_table", "/path/to/myfile.sql") """ self.log.debug('starting the ``csv`` method') import re if createStatement and "create table if not exists" not in createStatement.lower(): regex = re.compile(r'^\s*CREATE TABLE ', re.I | re.S) createStatement = regex.sub( "CREATE TABLE IF NOT EXISTS ", createStatement) renderedData = self._list_of_dictionaries_to_mysql_inserts( tableName=tableName, createStatement=createStatement ) if filepath and len(self.listOfDictionaries): # RECURSIVELY CREATE MISSING DIRECTORIES if not os.path.exists(os.path.dirname(filepath)): os.makedirs(os.path.dirname(filepath)) writeFile = codecs.open(filepath, encoding='utf-8', mode='w') writeFile.write(renderedData) writeFile.close() self.log.debug('completed the ``csv`` method') return renderedData
[ "def", "mysql", "(", "self", ",", "tableName", ",", "filepath", "=", "None", ",", "createStatement", "=", "None", ")", ":", "self", ".", "log", ".", "debug", "(", "'starting the ``csv`` method'", ")", "import", "re", "if", "createStatement", "and", "\"create...
40.758065
34.290323
def install_modules(wip): """Install the plugin modules""" def install_module(hfos_module): """Install a single module via setuptools""" try: setup = Popen( [ sys.executable, 'setup.py', 'develop' ], cwd='modules/' + hfos_module + "/" ) setup.wait() except Exception as e: log("Problem during module installation: ", hfos_module, e, type(e), exc=True, lvl=error) return False return True # TODO: Sort module dependencies via topological sort or let pip do this in future. # # To get the module dependencies: # packages = {} # for provision_entrypoint in iter_entry_points(group='hfos.provisions', # name=None): # log("Found packages: ", provision_entrypoint.dist.project_name, lvl=warn) # # _package_name = provision_entrypoint.dist.project_name # _package = pkg_resources.working_set.by_key[_package_name] # # print([str(r) for r in _package.requires()]) # retrieve deps from setup.py modules_production = [ # TODO: Poor man's dependency management, as long as the modules are # installed from local sources and they're not available on pypi, # which would handle real dependency management for us: 'navdata', # Now all the rest: 'alert', 'automat', 'busrepeater', 'calendar', 'countables', 'dash', # 'dev', 'enrol', 'mail', 'maps', 'nmea', 'nodestate', 'project', 'webguides', 'wiki' ] modules_wip = [ 'calc', 'camera', 'chat', 'comms', 'contacts', 'crew', 'equipment', 'filemanager', 'garden', 'heroic', 'ldap', 'library', 'logbook', 'protocols', 'polls', 'mesh', 'robot', 'switchboard', 'shareables', ] installables = modules_production if wip: installables.extend(modules_wip) success = [] failed = [] for installable in installables: log('Installing module ', installable) if install_module(installable): success.append(installable) else: failed.append(installable) log('Installed modules: ', success) if len(failed) > 0: log('Failed modules: ', failed) log('Done: Install Modules')
[ "def", "install_modules", "(", "wip", ")", ":", "def", "install_module", "(", "hfos_module", ")", ":", "\"\"\"Install a single module via setuptools\"\"\"", "try", ":", "setup", "=", "Popen", "(", "[", "sys", ".", "executable", ",", "'setup.py'", ",", "'develop'",...
25.818182
22.141414
def ExtractEvents(self, parser_mediator, registry_key, **kwargs): """Extracts events from a Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key. """ values_dict = {} if registry_key.number_of_values > 0: for registry_value in registry_key.GetValues(): value_name = registry_value.name or '(default)' if registry_value.DataIsString(): value_string = '[{0:s}] {1:s}'.format( registry_value.data_type_string, registry_value.GetDataAsObject()) elif registry_value.DataIsInteger(): value_string = '[{0:s}] {1:d}'.format( registry_value.data_type_string, registry_value.GetDataAsObject()) elif registry_value.DataIsMultiString(): value_string = '[{0:s}] {1:s}'.format( registry_value.data_type_string, ''.join( registry_value.GetDataAsObject())) else: value_string = '[{0:s}]'.format(registry_value.data_type_string) values_dict[value_name] = value_string # Generate at least one event object for the key. event_data = windows_events.WindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = registry_key.offset event_data.regvalue = values_dict event_data.urls = self.URLS event = time_events.DateTimeValuesEvent( registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) if registry_key.number_of_subkeys == 0: error_string = 'Key: {0:s} missing subkeys.'.format(registry_key.path) parser_mediator.ProduceExtractionWarning(error_string) return for zone_key in registry_key.GetSubkeys(): # TODO: these values are stored in the Description value of the # zone key. This solution will break on zone values that are larger # than 5. path = '{0:s}\\{1:s}'.format( registry_key.path, self._ZONE_NAMES[zone_key.name]) values_dict = {} # TODO: this plugin currently just dumps the values and does not # distinguish between what is a feature control or not. for value in zone_key.GetValues(): # Ignore the default value. if not value.name: continue if value.DataIsString(): value_string = value.GetDataAsObject() elif value.DataIsInteger(): value_integer = value.GetDataAsObject() if value.name in self._KNOWN_PERMISSIONS_VALUE_NAMES: value_string = self._CONTROL_VALUES_PERMISSIONS.get( value_integer, 'UNKNOWN') elif value.name == '1A00': value_string = self._CONTROL_VALUES_1A00.get( value_integer, 'UNKNOWN') elif value.name == '1C00': value_string = self._CONTROL_VALUES_1C00.get( value_integer, 'UNKNOWN') elif value.name == '1E05': value_string = self._CONTROL_VALUES_SAFETY.get( value_integer, 'UNKNOWN') else: value_string = '{0:d}'.format(value_integer) else: value_string = '[{0:s}]'.format(value.data_type_string) if len(value.name) == 4 and value.name != 'Icon': value_description = self._FEATURE_CONTROLS.get(value.name, 'UNKNOWN') else: value_description = self._FEATURE_CONTROLS.get(value.name, '') if value_description: feature_control = '[{0:s}] {1:s}'.format( value.name, value_description) else: feature_control = '[{0:s}]'.format(value.name) values_dict[feature_control] = value_string event_data = windows_events.WindowsRegistryEventData() event_data.key_path = path event_data.offset = zone_key.offset event_data.regvalue = values_dict event_data.urls = self.URLS event = time_events.DateTimeValuesEvent( zone_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
[ "def", "ExtractEvents", "(", "self", ",", "parser_mediator", ",", "registry_key", ",", "*", "*", "kwargs", ")", ":", "values_dict", "=", "{", "}", "if", "registry_key", ".", "number_of_values", ">", "0", ":", "for", "registry_value", "in", "registry_key", "....
37.926606
20.238532
def get_vt_xml(self, vt_id): """ Gets a single vulnerability test information in XML format. @return: String of single vulnerability test information in XML format. """ if not vt_id: return Element('vt') vt = self.vts.get(vt_id) name = vt.get('name') vt_xml = Element('vt') vt_xml.set('id', vt_id) for name, value in [('name', name)]: elem = SubElement(vt_xml, name) elem.text = str(value) if vt.get('vt_params'): params_xml_str = self.get_params_vt_as_xml_str( vt_id, vt.get('vt_params')) vt_xml.append(secET.fromstring(params_xml_str)) if vt.get('vt_refs'): refs_xml_str = self.get_refs_vt_as_xml_str( vt_id, vt.get('vt_refs')) vt_xml.append(secET.fromstring(refs_xml_str)) if vt.get('vt_dependencies'): dependencies = self.get_dependencies_vt_as_xml_str( vt_id, vt.get('vt_dependencies')) vt_xml.append(secET.fromstring(dependencies)) if vt.get('creation_time'): vt_ctime = self.get_creation_time_vt_as_xml_str( vt_id, vt.get('creation_time')) vt_xml.append(secET.fromstring(vt_ctime)) if vt.get('modification_time'): vt_mtime = self.get_modification_time_vt_as_xml_str( vt_id, vt.get('modification_time')) vt_xml.append(secET.fromstring(vt_mtime)) if vt.get('summary'): summary_xml_str = self.get_summary_vt_as_xml_str( vt_id, vt.get('summary')) vt_xml.append(secET.fromstring(summary_xml_str)) if vt.get('impact'): impact_xml_str = self.get_impact_vt_as_xml_str( vt_id, vt.get('impact')) vt_xml.append(secET.fromstring(impact_xml_str)) if vt.get('affected'): affected_xml_str = self.get_affected_vt_as_xml_str( vt_id, vt.get('affected')) vt_xml.append(secET.fromstring(affected_xml_str)) if vt.get('insight'): insight_xml_str = self.get_insight_vt_as_xml_str( vt_id, vt.get('insight')) vt_xml.append(secET.fromstring(insight_xml_str)) if vt.get('solution'): solution_xml_str = self.get_solution_vt_as_xml_str( vt_id, vt.get('solution'), vt.get('solution_type')) vt_xml.append(secET.fromstring(solution_xml_str)) if vt.get('detection') or vt.get('qod_type') or vt.get('qod'): detection_xml_str = self.get_detection_vt_as_xml_str( vt_id, vt.get('detection'), vt.get('qod_type'), vt.get('qod')) vt_xml.append(secET.fromstring(detection_xml_str)) if vt.get('severities'): severities_xml_str = self.get_severities_vt_as_xml_str( vt_id, vt.get('severities')) vt_xml.append(secET.fromstring(severities_xml_str)) if vt.get('custom'): custom_xml_str = self.get_custom_vt_as_xml_str( vt_id, vt.get('custom')) vt_xml.append(secET.fromstring(custom_xml_str)) return vt_xml
[ "def", "get_vt_xml", "(", "self", ",", "vt_id", ")", ":", "if", "not", "vt_id", ":", "return", "Element", "(", "'vt'", ")", "vt", "=", "self", ".", "vts", ".", "get", "(", "vt_id", ")", "name", "=", "vt", ".", "get", "(", "'name'", ")", "vt_xml",...
37.428571
19.428571
def _request(self, url, params, first_request_time=None, retry_counter=0, base_url=_DEFAULT_BASE_URL, accepts_clientid=True, extract_body=None, requests_kwargs=None, post_json=None): """Performs HTTP GET/POST with credentials, returning the body as JSON. :param url: URL path for the request. Should begin with a slash. :type url: string :param params: HTTP GET parameters. :type params: dict or list of key/value tuples :param first_request_time: The time of the first request (None if no retries have occurred). :type first_request_time: datetime.datetime :param retry_counter: The number of this retry, or zero for first attempt. :type retry_counter: int :param base_url: The base URL for the request. Defaults to the Maps API server. Should not have a trailing slash. :type base_url: string :param accepts_clientid: Whether this call supports the client/signature params. Some APIs require API keys (e.g. Roads). :type accepts_clientid: bool :param extract_body: A function that extracts the body from the request. If the request was not successful, the function should raise a googlemaps.HTTPError or googlemaps.ApiError as appropriate. :type extract_body: function :param requests_kwargs: Same extra keywords arg for requests as per __init__, but provided here to allow overriding internally on a per-request basis. :type requests_kwargs: dict :raises ApiError: when the API returns an error. :raises Timeout: if the request timed out. :raises TransportError: when something went wrong while trying to exceute a request. """ if not first_request_time: first_request_time = datetime.now() elapsed = datetime.now() - first_request_time if elapsed > self.retry_timeout: raise googlemaps.exceptions.Timeout() if retry_counter > 0: # 0.5 * (1.5 ^ i) is an increased sleep time of 1.5x per iteration, # starting at 0.5s when retry_counter=0. The first retry will occur # at 1, so subtract that first. delay_seconds = 0.5 * 1.5 ** (retry_counter - 1) # Jitter this value by 50% and pause. time.sleep(delay_seconds * (random.random() + 0.5)) authed_url = self._generate_auth_url(url, params, accepts_clientid) # Default to the client-level self.requests_kwargs, with method-level # requests_kwargs arg overriding. requests_kwargs = requests_kwargs or {} final_requests_kwargs = dict(self.requests_kwargs, **requests_kwargs) # Determine GET/POST. requests_method = self.session.get if post_json is not None: requests_method = self.session.post final_requests_kwargs["json"] = post_json try: response = requests_method(base_url + authed_url, **final_requests_kwargs) except requests.exceptions.Timeout: raise googlemaps.exceptions.Timeout() except Exception as e: raise googlemaps.exceptions.TransportError(e) if response.status_code in _RETRIABLE_STATUSES: # Retry request. return self._request(url, params, first_request_time, retry_counter + 1, base_url, accepts_clientid, extract_body, requests_kwargs, post_json) # Check if the time of the nth previous query (where n is # queries_per_second) is under a second ago - if so, sleep for # the difference. if self.sent_times and len(self.sent_times) == self.queries_per_second: elapsed_since_earliest = time.time() - self.sent_times[0] if elapsed_since_earliest < 1: time.sleep(1 - elapsed_since_earliest) try: if extract_body: result = extract_body(response) else: result = self._get_body(response) self.sent_times.append(time.time()) return result except googlemaps.exceptions._RetriableRequest as e: if isinstance(e, googlemaps.exceptions._OverQueryLimit) and not self.retry_over_query_limit: raise # Retry request. return self._request(url, params, first_request_time, retry_counter + 1, base_url, accepts_clientid, extract_body, requests_kwargs, post_json)
[ "def", "_request", "(", "self", ",", "url", ",", "params", ",", "first_request_time", "=", "None", ",", "retry_counter", "=", "0", ",", "base_url", "=", "_DEFAULT_BASE_URL", ",", "accepts_clientid", "=", "True", ",", "extract_body", "=", "None", ",", "reques...
42.366972
23.376147
def nunique(self, dropna=True): """ Return number of unique elements in the group. """ ids, _, _ = self.grouper.group_info val = self.obj.get_values() try: sorter = np.lexsort((val, ids)) except TypeError: # catches object dtypes msg = 'val.dtype must be object, got {}'.format(val.dtype) assert val.dtype == object, msg val, _ = algorithms.factorize(val, sort=False) sorter = np.lexsort((val, ids)) _isna = lambda a: a == -1 else: _isna = isna ids, val = ids[sorter], val[sorter] # group boundaries are where group ids change # unique observations are where sorted values change idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]] inc = np.r_[1, val[1:] != val[:-1]] # 1st item of each group is a new unique observation mask = _isna(val) if dropna: inc[idx] = 1 inc[mask] = 0 else: inc[mask & np.r_[False, mask[:-1]]] = 0 inc[idx] = 1 out = np.add.reduceat(inc, idx).astype('int64', copy=False) if len(ids): # NaN/NaT group exists if the head of ids is -1, # so remove it from res and exclude its index from idx if ids[0] == -1: res = out[1:] idx = idx[np.flatnonzero(idx)] else: res = out else: res = out[1:] ri = self.grouper.result_index # we might have duplications among the bins if len(res) != len(ri): res, out = np.zeros(len(ri), dtype=out.dtype), res res[ids[idx]] = out return Series(res, index=ri, name=self._selection_name)
[ "def", "nunique", "(", "self", ",", "dropna", "=", "True", ")", ":", "ids", ",", "_", ",", "_", "=", "self", ".", "grouper", ".", "group_info", "val", "=", "self", ".", "obj", ".", "get_values", "(", ")", "try", ":", "sorter", "=", "np", ".", "...
32.017857
17.517857