text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def MessageToJson(message, including_default_value_fields=False, preserving_proto_field_name=False): """Converts protobuf message to JSON format. Args: message: The protocol buffers message instance to serialize. including_default_value_fields: If True, singular primitive fields, repeated fields, and map fields will always be serialized. If False, only serialize non-empty fields. Singular message fields and oneof fields are not affected by this option. preserving_proto_field_name: If True, use the original proto field names as defined in the .proto file. If False, convert the field names to lowerCamelCase. Returns: A string containing the JSON formatted protocol buffer message. """ printer = _Printer(including_default_value_fields, preserving_proto_field_name) return printer.ToJsonString(message)
[ "def", "MessageToJson", "(", "message", ",", "including_default_value_fields", "=", "False", ",", "preserving_proto_field_name", "=", "False", ")", ":", "printer", "=", "_Printer", "(", "including_default_value_fields", ",", "preserving_proto_field_name", ")", "return", ...
43.619048
20.904762
def tobytes( self, root=None, encoding='UTF-8', doctype=None, canonicalized=True, xml_declaration=True, pretty_print=True, with_comments=True, ): """return the content of the XML document as a byte string suitable for writing""" if root is None: root = self.root if canonicalized == True: return self.canonicalized_bytes(root) else: return etree.tostring( root, encoding=encoding or self.info.encoding, doctype=doctype or self.info.doctype, xml_declaration=xml_declaration, pretty_print=pretty_print, with_comments=with_comments, )
[ "def", "tobytes", "(", "self", ",", "root", "=", "None", ",", "encoding", "=", "'UTF-8'", ",", "doctype", "=", "None", ",", "canonicalized", "=", "True", ",", "xml_declaration", "=", "True", ",", "pretty_print", "=", "True", ",", "with_comments", "=", "T...
32.291667
14.666667
def summarize_crud_mutation(method, model, isAsync=False): """ This function provides the standard form for crud mutations. """ # create the approrpriate action type action_type = get_crud_action(method=method, model=model) # the name of the mutation name = crud_mutation_name(model=model, action=method) # a mapping of methods to input factories input_map = { 'create': create_mutation_inputs, 'update': update_mutation_inputs, 'delete': delete_mutation_inputs, } # a mappting of methods to output factories output_map = { 'create': create_mutation_outputs, 'update': update_mutation_outputs, 'delete': delete_mutation_outputs, } # the inputs for the mutation inputs = input_map[method](model) # the mutation outputs outputs = output_map[method](model) # return the appropriate summary return summarize_mutation( mutation_name=name, event=action_type, isAsync=isAsync, inputs=inputs, outputs=outputs )
[ "def", "summarize_crud_mutation", "(", "method", ",", "model", ",", "isAsync", "=", "False", ")", ":", "# create the approrpriate action type", "action_type", "=", "get_crud_action", "(", "method", "=", "method", ",", "model", "=", "model", ")", "# the name of the m...
30.676471
13.617647
def merge_global_options(self, needs_info): """Add all global defined options to needs_info""" global_options = getattr(self.env.app.config, 'needs_global_options', None) if global_options is None: return for key, value in global_options.items(): # If key already exists in needs_info, this global_option got overwritten manually in current need if key in needs_info.keys(): continue needs_info[key] = value
[ "def", "merge_global_options", "(", "self", ",", "needs_info", ")", ":", "global_options", "=", "getattr", "(", "self", ".", "env", ".", "app", ".", "config", ",", "'needs_global_options'", ",", "None", ")", "if", "global_options", "is", "None", ":", "return...
41.166667
21.166667
def build_authorization_endpoint(self, request, disable_sso=None): """ This function returns the ADFS authorization URL. Args: request(django.http.request.HttpRequest): A django Request object disable_sso(bool): Whether to disable single sign-on and force the ADFS server to show a login prompt. Returns: str: The redirect URI """ self.load_config() redirect_to = request.GET.get(REDIRECT_FIELD_NAME, None) if not redirect_to: redirect_to = django_settings.LOGIN_REDIRECT_URL redirect_to = base64.urlsafe_b64encode(redirect_to.encode()).decode() query = QueryDict(mutable=True) query.update({ "response_type": "code", "client_id": settings.CLIENT_ID, "resource": settings.RELYING_PARTY_ID, "redirect_uri": self.redirect_uri(request), "state": redirect_to, }) if self._mode == "openid_connect": query["scope"] = "openid" if (disable_sso is None and settings.DISABLE_SSO) or disable_sso is True: query["prompt"] = "login" return "{0}?{1}".format(self.authorization_endpoint, query.urlencode())
[ "def", "build_authorization_endpoint", "(", "self", ",", "request", ",", "disable_sso", "=", "None", ")", ":", "self", ".", "load_config", "(", ")", "redirect_to", "=", "request", ".", "GET", ".", "get", "(", "REDIRECT_FIELD_NAME", ",", "None", ")", "if", ...
39.516129
21.258065
def prt_gos_flat(self, prt): """Print flat GO list.""" prtfmt = self.datobj.kws['fmtgo'] _go2nt = self.sortobj.grprobj.go2nt go2nt = {go:_go2nt[go] for go in self.go2nt} prt.write("\n{N} GO IDs:\n".format(N=len(go2nt))) _sortby = self._get_sortgo() for ntgo in sorted(go2nt.values(), key=_sortby): prt.write(prtfmt.format(**ntgo._asdict()))
[ "def", "prt_gos_flat", "(", "self", ",", "prt", ")", ":", "prtfmt", "=", "self", ".", "datobj", ".", "kws", "[", "'fmtgo'", "]", "_go2nt", "=", "self", ".", "sortobj", ".", "grprobj", ".", "go2nt", "go2nt", "=", "{", "go", ":", "_go2nt", "[", "go",...
44.444444
8.777778
def configure_discord_logger( self, discord_webhook=None, discord_recipient=None, log_level='ERROR', log_format=ReportingFormats.PRETTY_PRINT.value, custom_args='' ): """logger for sending messages to Discord. Easy way to alert humans of issues Note: Will try to overwrite minimum log level to enable requested log_level Will warn and not attach hipchat logger if missing webhook key Learn more about webhooks: https://support.discordapp.com/hc/en-us/articles/228383668-Intro-to-Webhooks Args: discord_webhook (str): discord room webhook (full URL) discord_recipient (`str`:<@int>, optional): user/group to notify log_level (str): desired log level for handle https://docs.python.org/3/library/logging.html#logging-levels log_format (str): format for logging messages https://docs.python.org/3/library/logging.html#logrecord-attributes custom_args (str): special ID to include in messages """ # Override defaults if required # discord_webhook = self.config.get_option( 'LOGGING', 'discord_webhook', None, discord_webhook ) discord_recipient = self.config.get_option( 'LOGGING', 'discord_recipient', None, discord_recipient ) log_level = self.config.get_option( 'LOGGING', 'discord_level', None, log_level ) # Actually build discord logging handler # discord_obj = DiscordWebhook() discord_obj.webhook(discord_webhook) # vv TODO vv: Test review # if discord_obj.can_query: discord_handler = HackyDiscordHandler( discord_obj, discord_recipient ) self._configure_common( 'discord_', log_level, log_format, 'Discord', discord_handler, custom_args=custom_args ) else: warnings.warn( 'Unable to execute webhook', exceptions.WebhookCreateFailed )
[ "def", "configure_discord_logger", "(", "self", ",", "discord_webhook", "=", "None", ",", "discord_recipient", "=", "None", ",", "log_level", "=", "'ERROR'", ",", "log_format", "=", "ReportingFormats", ".", "PRETTY_PRINT", ".", "value", ",", "custom_args", "=", ...
36.633333
19.683333
def writeXYCatalog(self,filename): """ Write out the X,Y catalog to a file """ if self.xypos is None: warnstr = textutil.textbox( 'WARNING: \n No X,Y source catalog to write to file. ') for line in warnstr.split('\n'): log.warning(line) print(warnstr) return f = open(filename,'w') f.write("# Source catalog derived for %s\n"%self.wcs.filename) f.write("# Columns: \n") if self.use_sharp_round: f.write('# X Y Flux ID Sharp Round1 Round2\n') else: f.write('# X Y Flux ID\n') f.write('# (%s) (%s)\n'%(self.in_units,self.in_units)) for row in range(len(self.xypos[0])): for i in range(len(self.xypos)): f.write("%g "%(self.xypos[i][row])) f.write("\n") f.close()
[ "def", "writeXYCatalog", "(", "self", ",", "filename", ")", ":", "if", "self", ".", "xypos", "is", "None", ":", "warnstr", "=", "textutil", ".", "textbox", "(", "'WARNING: \\n No X,Y source catalog to write to file. '", ")", "for", "line", "in", "warnstr", "....
36.115385
17.807692
def resolve_post(self, post): """Mark post as resolved :type post: dict|str|int :param post: Either the post dict returned by another API method, or the `cid` field of that post. :returns: True if it is successful. False otherwise """ try: cid = post["id"] except KeyError: cid = post params = { "cid": cid, "resolved": "true" } return self._rpc.content_mark_resolved(params)
[ "def", "resolve_post", "(", "self", ",", "post", ")", ":", "try", ":", "cid", "=", "post", "[", "\"id\"", "]", "except", "KeyError", ":", "cid", "=", "post", "params", "=", "{", "\"cid\"", ":", "cid", ",", "\"resolved\"", ":", "\"true\"", "}", "retur...
26.368421
19.052632
def _get_uncolored_output(self, script, value): """ Creates an uncolored output. :param bytes script: The output script. :param int value: The satoshi value of the output. :return: An object representing the uncolored output. :rtype: TransactionOutput """ if value < self._dust_amount: raise DustOutputError return bitcoin.core.CTxOut(value, bitcoin.core.CScript(script))
[ "def", "_get_uncolored_output", "(", "self", ",", "script", ",", "value", ")", ":", "if", "value", "<", "self", ".", "_dust_amount", ":", "raise", "DustOutputError", "return", "bitcoin", ".", "core", ".", "CTxOut", "(", "value", ",", "bitcoin", ".", "core"...
34.230769
14.230769
def _check_auth_handler(self, request: Dict[str, Any]): """用于验证客户端是否有权限调服务. 如果服务端有验证信息,则会根据验证信息判断是否合法 + 如果合法,那么返回一条信息用于响应验证请求 + 如果不合法,那么返回验证错误 如果服务端没有验证信息 + 如果验证信息都为空,直接返回响应 + 如果信息不为空,那么返回验证错误 Parameters: request (Dict[str, Any]): - python字典形式的请求 Return: (bool): - 请求是否被验证通过,通过了返回True Raise: (LoginError): - 当验证不通过时抛出 """ a_username = request.get("AUTH").get("USERNAME") a_password = request.get("AUTH").get("PASSWORD") auth_len = len(self.auth) if auth_len == 0: if any([a_username, a_password]): if self.debug: access_logger.info("login failed", extra=self._extra) raise LoginError("login error ,unknown username/password") else: return True else: for username, password in self.auth: if all([a_username == username, a_password == password]): response = { "MPRPC": self.VERSION, "CODE": 100, "VERSION": self.method_wrapper.version, "DESC": self.method_wrapper.__doc__, "DEBUG": self.debug, "COMPRESER": self.compreser.__name__ if ( self.compreser) else None, "TIMEOUT": self.timeout, } self.writer(response) if self.debug: access_logger.info("login succeed", extra=self._extra) break else: if self.debug: access_logger.info("login failed", extra=self._extra) raise LoginError("login error ,unknown username/password") return True
[ "def", "_check_auth_handler", "(", "self", ",", "request", ":", "Dict", "[", "str", ",", "Any", "]", ")", ":", "a_username", "=", "request", ".", "get", "(", "\"AUTH\"", ")", ".", "get", "(", "\"USERNAME\"", ")", "a_password", "=", "request", ".", "get...
35.188679
18.358491
def walk_regularity_symmetry(self, data_frame): """ This method extracts the step and stride regularity and also walk symmetry. :param data_frame: The data frame. It should have x, y, and z columns. :type data_frame: pandas.DataFrame :return step_regularity: Regularity of steps on [x, y, z] coordinates, defined as the consistency of the step-to-step pattern. :rtype step_regularity: numpy.ndarray :return stride_regularity: Regularity of stride on [x, y, z] coordinates, defined as the consistency of the stride-to-stride pattern. :rtype stride_regularity: numpy.ndarray :return walk_symmetry: Symmetry of walk on [x, y, z] coordinates, defined as the difference between step and stride regularity. :rtype walk_symmetry: numpy.ndarray """ def _symmetry(v): maxtab, _ = peakdet(v, self.delta) return maxtab[1][1], maxtab[2][1] step_regularity_x, stride_regularity_x = _symmetry(autocorrelation(data_frame.x)) step_regularity_y, stride_regularity_y = _symmetry(autocorrelation(data_frame.y)) step_regularity_z, stride_regularity_z = _symmetry(autocorrelation(data_frame.z)) symmetry_x = step_regularity_x - stride_regularity_x symmetry_y = step_regularity_y - stride_regularity_y symmetry_z = step_regularity_z - stride_regularity_z step_regularity = np.array([step_regularity_x, step_regularity_y, step_regularity_z]) stride_regularity = np.array([stride_regularity_x, stride_regularity_y, stride_regularity_z]) walk_symmetry = np.array([symmetry_x, symmetry_y, symmetry_z]) return step_regularity, stride_regularity, walk_symmetry
[ "def", "walk_regularity_symmetry", "(", "self", ",", "data_frame", ")", ":", "def", "_symmetry", "(", "v", ")", ":", "maxtab", ",", "_", "=", "peakdet", "(", "v", ",", "self", ".", "delta", ")", "return", "maxtab", "[", "1", "]", "[", "1", "]", ","...
56.548387
34.322581
def spkltc(targ, et, ref, abcorr, stobs): """ Return the state (position and velocity) of a target body relative to an observer, optionally corrected for light time, expressed relative to an inertial reference frame. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkltc_c.html :param targ: Target body. :type targ: int :param et: Observer epoch. :type et: float :param ref: Inertial reference frame of output state. :type ref: str :param abcorr: Aberration correction flag. :type abcorr: str :param stobs: State of the observer relative to the SSB. :type stobs: 6-Element Array of floats :return: One way light time between observer and target, Derivative of light time with respect to time :rtype: tuple """ assert len(stobs) == 6 targ = stypes.c_int(targ) et = ctypes.c_double(et) ref = stypes.stringToCharP(ref) abcorr = stypes.stringToCharP(abcorr) stobs = stypes.toDoubleVector(stobs) starg = stypes.emptyDoubleVector(6) lt = ctypes.c_double() dlt = ctypes.c_double() libspice.spkltc_c(targ, et, ref, abcorr, stobs, starg, ctypes.byref(lt), ctypes.byref(dlt)) return stypes.cVectorToPython(starg), lt.value, dlt.value
[ "def", "spkltc", "(", "targ", ",", "et", ",", "ref", ",", "abcorr", ",", "stobs", ")", ":", "assert", "len", "(", "stobs", ")", "==", "6", "targ", "=", "stypes", ".", "c_int", "(", "targ", ")", "et", "=", "ctypes", ".", "c_double", "(", "et", "...
36.057143
15.371429
def Ping(self, request, context): """ Invoke the Server health endpoint :param request: Empty :param context: the request context :return: Status message 'alive' """ status = processor_pb2.Status() status.message='alive' return status
[ "def", "Ping", "(", "self", ",", "request", ",", "context", ")", ":", "status", "=", "processor_pb2", ".", "Status", "(", ")", "status", ".", "message", "=", "'alive'", "return", "status" ]
29.8
5.2
def under_variable_scope(): """ Returns: A decorator which makes the function happen under a variable scope, which is named by the function itself. Example: .. code-block:: python @under_variable_scope() def mid_level(x): with argscope(Conv2D, kernel_shape=3, nl=BNReLU): x = Conv2D('conv1', x, 512, stride=1) x = Conv2D('conv2', x, 256, stride=1) return x """ def _impl(func): @functools.wraps(func) def wrapper(*args, **kwargs): name = func.__name__ with tf.variable_scope(name): return func(*args, **kwargs) return wrapper return _impl
[ "def", "under_variable_scope", "(", ")", ":", "def", "_impl", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "name", "=", "func", ".", "__name__", "wit...
25.814815
18.62963
def description(self): """ Cursor description, see http://legacy.python.org/dev/peps/pep-0249/#description """ if self._session is None: return None res = self._session.res_info if res: return res.description else: return None
[ "def", "description", "(", "self", ")", ":", "if", "self", ".", "_session", "is", "None", ":", "return", "None", "res", "=", "self", ".", "_session", ".", "res_info", "if", "res", ":", "return", "res", ".", "description", "else", ":", "return", "None" ...
30.1
12.1
def dispatch_on(*dispatch_args): """ Factory of decorators turning a function into a generic function dispatching on the given arguments. """ assert dispatch_args, 'No dispatch args passed' dispatch_str = '(%s,)' % ', '.join(dispatch_args) def check(arguments, wrong=operator.ne, msg=''): """Make sure one passes the expected number of arguments""" if wrong(len(arguments), len(dispatch_args)): raise TypeError('Expected %d arguments, got %d%s' % (len(dispatch_args), len(arguments), msg)) def gen_func_dec(func): """Decorator turning a function into a generic function""" # first check the dispatch arguments argset = set(getfullargspec(func).args) if not set(dispatch_args) <= argset: raise NameError('Unknown dispatch arguments %s' % dispatch_str) typemap = {} def vancestors(*types): """ Get a list of sets of virtual ancestors for the given types """ check(types) ras = [[] for _ in range(len(dispatch_args))] for types_ in typemap: for t, type_, ra in zip(types, types_, ras): if issubclass(t, type_) and type_ not in t.mro(): append(type_, ra) return [set(ra) for ra in ras] def ancestors(*types): """ Get a list of virtual MROs, one for each type """ check(types) lists = [] for t, vas in zip(types, vancestors(*types)): n_vas = len(vas) if n_vas > 1: raise RuntimeError( 'Ambiguous dispatch for %s: %s' % (t, vas)) elif n_vas == 1: va, = vas mro = type('t', (t, va), {}).mro()[1:] else: mro = t.mro() lists.append(mro[:-1]) # discard t and object return lists def register(*types): """ Decorator to register an implementation for the given types """ check(types) def dec(f): check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__) typemap[types] = f return f return dec def dispatch_info(*types): """ An utility to introspect the dispatch algorithm """ check(types) lst = [] for anc in itertools.product(*ancestors(*types)): lst.append(tuple(a.__name__ for a in anc)) return lst def _dispatch(dispatch_args, *args, **kw): types = tuple(type(arg) for arg in dispatch_args) try: # fast path f = typemap[types] except KeyError: pass else: return f(*args, **kw) combinations = itertools.product(*ancestors(*types)) next(combinations) # the first one has been already tried for types_ in combinations: f = typemap.get(types_) if f is not None: return f(*args, **kw) # else call the default implementation return func(*args, **kw) return FunctionMaker.create( func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str, dict(_f_=_dispatch), register=register, default=func, typemap=typemap, vancestors=vancestors, ancestors=ancestors, dispatch_info=dispatch_info, __wrapped__=func) gen_func_dec.__name__ = 'dispatch_on' + dispatch_str return gen_func_dec
[ "def", "dispatch_on", "(", "*", "dispatch_args", ")", ":", "assert", "dispatch_args", ",", "'No dispatch args passed'", "dispatch_str", "=", "'(%s,)'", "%", "', '", ".", "join", "(", "dispatch_args", ")", "def", "check", "(", "arguments", ",", "wrong", "=", "o...
35.563107
16.485437
def _get_instance_attributes(self): """Return a generator for instance attributes' name and value. .. code-block:: python3 for _name, _value in self._get_instance_attributes(): print("attribute name: {}".format(_name)) print("attribute value: {}".format(_value)) Returns: generator: tuples with attribute name and value. """ for name, value in self.__dict__.items(): if name in map((lambda x: x[0]), self.get_class_attributes()): yield (name, value)
[ "def", "_get_instance_attributes", "(", "self", ")", ":", "for", "name", ",", "value", "in", "self", ".", "__dict__", ".", "items", "(", ")", ":", "if", "name", "in", "map", "(", "(", "lambda", "x", ":", "x", "[", "0", "]", ")", ",", "self", ".",...
35.125
20.4375
def onlineTable(self, login, tableName): """ Parameters: - login - tableName """ self.send_onlineTable(login, tableName) self.recv_onlineTable()
[ "def", "onlineTable", "(", "self", ",", "login", ",", "tableName", ")", ":", "self", ".", "send_onlineTable", "(", "login", ",", "tableName", ")", "self", ".", "recv_onlineTable", "(", ")" ]
20.875
11.625
def oracle_grover(oracle: Program, qubits: List[int], num_iter: int = None) -> Program: """ Implementation of Grover's Algorithm for a given oracle. :param oracle: An oracle defined as a Program. It should send :math:`\ket{x}` to :math:`(-1)^{f(x)}\ket{x}`, where the range of f is {0, 1}. :param qubits: List of qubits for Grover's Algorithm. :param num_iter: The number of iterations to repeat the algorithm for. The default is the integer closest to :math:`\frac{\pi}{4}\sqrt{N}`, where :math:`N` is the size of the domain. :return: A program corresponding to the desired instance of Grover's Algorithm. """ if num_iter is None: num_iter = int(round(np.pi * 2 ** (len(qubits) / 2.0 - 2.0))) uniform_superimposer = Program().inst([H(qubit) for qubit in qubits]) amp_prog = amplification_circuit(uniform_superimposer, oracle, qubits, num_iter) return amp_prog
[ "def", "oracle_grover", "(", "oracle", ":", "Program", ",", "qubits", ":", "List", "[", "int", "]", ",", "num_iter", ":", "int", "=", "None", ")", "->", "Program", ":", "if", "num_iter", "is", "None", ":", "num_iter", "=", "int", "(", "round", "(", ...
59.235294
30.764706
def _build_app(args, extra_args): """Builds an app or applet and returns the resulting executable ID (unless it was a dry-run, in which case None is returned). TODO: remote app builds still return None, but we should fix this. """ if not args.remote: # LOCAL BUILD try: output = build_and_upload_locally( args.src_dir, args.mode, overwrite=args.overwrite, archive=args.archive, publish=args.publish, destination_override=args.destination, version_override=args.version_override, bill_to_override=args.bill_to, use_temp_build_project=args.use_temp_build_project, do_parallel_build=args.parallel_build, do_version_autonumbering=args.version_autonumbering, do_try_update=args.update, dx_toolkit_autodep=args.dx_toolkit_autodep, do_check_syntax=args.check_syntax, ensure_upload=args.ensure_upload, force_symlinks=args.force_symlinks, dry_run=args.dry_run, confirm=args.confirm, return_object_dump=args.json, region=args.region, **extra_args ) if output is not None and args.run is None: print(json.dumps(output)) except dxpy.app_builder.AppBuilderException as e: # AppBuilderException represents errors during app or applet building # that could reasonably have been anticipated by the user. print("Error: %s" % (e.args,), file=sys.stderr) sys.exit(3) except dxpy.exceptions.DXAPIError as e: print("Error: %s" % (e,), file=sys.stderr) sys.exit(3) if args.dry_run: return None return output['id'] else: # REMOTE BUILD try: app_json = _parse_app_spec(args.src_dir) _check_suggestions(app_json, publish=args.publish) _verify_app_source_dir(args.src_dir, args.mode) if args.mode == "app" and not args.dry_run: dxpy.executable_builder.verify_developer_rights('app-' + app_json['name']) except dxpy.app_builder.AppBuilderException as e: print("Error: %s" % (e.args,), file=sys.stderr) sys.exit(3) # The following flags might be useful in conjunction with # --remote. To enable these, we need to learn how to pass these # options through to the interior call of dx_build_app(let). if args.dry_run: parser.error('--remote cannot be combined with --dry-run') if args.overwrite: parser.error('--remote cannot be combined with --overwrite/-f') if args.archive: parser.error('--remote cannot be combined with --archive/-a') # The following flags are probably not useful in conjunction # with --remote. if args.json: parser.error('--remote cannot be combined with --json') if not args.use_temp_build_project: parser.error('--remote cannot be combined with --no-temp-build-project') if isinstance(args.region, list) and len(args.region) > 1: parser.error('--region can only be specified once for remote builds') region = args.region[0] if args.region is not None else None more_kwargs = {} if args.version_override: more_kwargs['version_override'] = args.version_override if args.bill_to: more_kwargs['bill_to_override'] = args.bill_to if not args.version_autonumbering: more_kwargs['do_version_autonumbering'] = False if not args.update: more_kwargs['do_try_update'] = False if not args.parallel_build: more_kwargs['do_parallel_build'] = False if not args.check_syntax: more_kwargs['do_check_syntax'] = False return _build_app_remote(args.mode, args.src_dir, destination_override=args.destination, publish=args.publish, dx_toolkit_autodep=args.dx_toolkit_autodep, region=region, watch=args.watch, **more_kwargs)
[ "def", "_build_app", "(", "args", ",", "extra_args", ")", ":", "if", "not", "args", ".", "remote", ":", "# LOCAL BUILD", "try", ":", "output", "=", "build_and_upload_locally", "(", "args", ".", "src_dir", ",", "args", ".", "mode", ",", "overwrite", "=", ...
41.126214
21.097087
def is_complete(self): """:rtype: ``bool`` ``True`` if transfer is complete""" if 'status' in self.transfer_info: self._complete = self.transfer_info['status'] == 'STATUS_COMPLETE' return self._complete
[ "def", "is_complete", "(", "self", ")", ":", "if", "'status'", "in", "self", ".", "transfer_info", ":", "self", ".", "_complete", "=", "self", ".", "transfer_info", "[", "'status'", "]", "==", "'STATUS_COMPLETE'", "return", "self", ".", "_complete" ]
33.428571
21.285714
def get_maximum_range(self, hmm): ''' If no maximum range has been specified, and if using a hmm search, a maximum range can be determined by using the length of the HMM Parameters ---------- hmm : str path to hmm profile Returns ------- Length to search to when linking hits on a single contig ''' length=int([x for x in open(hmm) if x.startswith("LENG")][0].split()[1]) max_length=round(length*1.5, 0) return max_length
[ "def", "get_maximum_range", "(", "self", ",", "hmm", ")", ":", "length", "=", "int", "(", "[", "x", "for", "x", "in", "open", "(", "hmm", ")", "if", "x", ".", "startswith", "(", "\"LENG\"", ")", "]", "[", "0", "]", ".", "split", "(", ")", "[", ...
29.055556
25.388889
def istriangular(am): r""" Returns ``True`` is the sparse adjacency matrix is either upper or lower triangular """ if am.format != 'coo': am = am.tocoo(copy=False) return istril(am) or istriu(am)
[ "def", "istriangular", "(", "am", ")", ":", "if", "am", ".", "format", "!=", "'coo'", ":", "am", "=", "am", ".", "tocoo", "(", "copy", "=", "False", ")", "return", "istril", "(", "am", ")", "or", "istriu", "(", "am", ")" ]
27.5
13.375
def normalize_slice(s, total): """ Return a "canonical" version of slice ``s``. :param slice s: the original slice expression :param total int: total number of elements in the collection sliced by ``s`` :return slice: a slice equivalent to ``s`` but not containing any negative indices or Nones. """ newstart = 0 if s.start is None else max(0, s.start + total) if s.start < 0 else min(s.start, total) newstop = total if s.stop is None else max(0, s.stop + total) if s.stop < 0 else min(s.stop, total) newstep = 1 if s.step is None else s.step return slice(newstart, newstop, newstep)
[ "def", "normalize_slice", "(", "s", ",", "total", ")", ":", "newstart", "=", "0", "if", "s", ".", "start", "is", "None", "else", "max", "(", "0", ",", "s", ".", "start", "+", "total", ")", "if", "s", ".", "start", "<", "0", "else", "min", "(", ...
51.083333
24.916667
def CallUDFUNS(f, x): """ We are given a UDF CFUNCTYPE and want to call it in python :param f: SpiceUDFUNS :type f: CFUNCTYPE :param x: some scalar :type x: float :return: value :rtype: float """ value = c_double() f(x, byref(value)) return value.value
[ "def", "CallUDFUNS", "(", "f", ",", "x", ")", ":", "value", "=", "c_double", "(", ")", "f", "(", "x", ",", "byref", "(", "value", ")", ")", "return", "value", ".", "value" ]
20.571429
17.857143
def _load(pathtovector, wordlist, num_to_load=None, truncate_embeddings=None, sep=" "): """Load a matrix and wordlist from a .vec file.""" vectors = [] addedwords = set() words = [] try: wordlist = set(wordlist) except ValueError: wordlist = set() logger.info("Loading {0}".format(pathtovector)) firstline = open(pathtovector).readline().strip() try: num, size = firstline.split(sep) num, size = int(num), int(size) logger.info("Vector space: {} by {}".format(num, size)) header = True except ValueError: size = len(firstline.split(sep)) - 1 logger.info("Vector space: {} dim, # items unknown".format(size)) word, rest = firstline.split(sep, 1) # If the first line is correctly parseable, set header to False. header = False if truncate_embeddings is None or truncate_embeddings == 0: truncate_embeddings = size for idx, line in enumerate(open(pathtovector, encoding='utf-8')): if header and idx == 0: continue word, rest = line.rstrip(" \n").split(sep, 1) if wordlist and word not in wordlist: continue if word in addedwords: raise ValueError("Duplicate: {} on line {} was in the " "vector space twice".format(word, idx)) if len(rest.split(sep)) != size: raise ValueError("Incorrect input at index {}, size " "is {}, expected " "{}".format(idx+1, len(rest.split(sep)), size)) words.append(word) addedwords.add(word) vectors.append(np.fromstring(rest, sep=sep)[:truncate_embeddings]) if num_to_load is not None and len(addedwords) >= num_to_load: break vectors = np.array(vectors).astype(np.float32) logger.info("Loading finished") if wordlist: diff = wordlist - addedwords if diff: logger.info("Not all items from your wordlist were in your " "vector space: {}.".format(diff)) return vectors, words
[ "def", "_load", "(", "pathtovector", ",", "wordlist", ",", "num_to_load", "=", "None", ",", "truncate_embeddings", "=", "None", ",", "sep", "=", "\" \"", ")", ":", "vectors", "=", "[", "]", "addedwords", "=", "set", "(", ")", "words", "=", "[", "]", ...
33.9
21.642857
def do_ls(self, params): """ \x1b[1mNAME\x1b[0m ls - Lists the znodes for the given <path> \x1b[1mSYNOPSIS\x1b[0m ls <path> [watch] [sep] \x1b[1mOPTIONS\x1b[0m * watch: set a (child) watch on the path (default: false) * sep: separator to be used (default: '\\n') \x1b[1mEXAMPLES\x1b[0m > ls / configs zookeeper Setting a watch: > ls / true configs zookeeper > create /foo 'bar' WatchedEvent(type='CHILD', state='CONNECTED', path=u'/') > ls / false , configs,zookeeper """ watcher = lambda evt: self.show_output(str(evt)) kwargs = {"watch": watcher} if params.watch else {} znodes = self._zk.get_children(params.path, **kwargs) self.show_output(params.sep.join(sorted(znodes)))
[ "def", "do_ls", "(", "self", ",", "params", ")", ":", "watcher", "=", "lambda", "evt", ":", "self", ".", "show_output", "(", "str", "(", "evt", ")", ")", "kwargs", "=", "{", "\"watch\"", ":", "watcher", "}", "if", "params", ".", "watch", "else", "{...
24.088235
22.676471
def post(self, request): """ POST /consent/api/v1/data_sharing_consent Requires a JSON object of the following format: >>> { >>> "username": "bob", >>> "course_id": "course-v1:edX+DemoX+Demo_Course", >>> "enterprise_customer_uuid": "enterprise-uuid-goes-right-here" >>> } Keys: *username* The edX username from whom to get consent. *course_id* The course for which consent is granted. *enterprise_customer_uuid* The UUID of the enterprise customer that requires consent. """ try: consent_record = self.get_consent_record(request) if consent_record is None: return self.get_no_record_response(request) if consent_record.consent_required(): # If and only if the given EnterpriseCustomer requires data sharing consent # for the given course, then, since we've received a POST request, set the # consent state for the EC/user/course combo. consent_record.granted = True # Models don't have return values when saving, but ProxyDataSharingConsent # objects do - they should return either a model instance, or another instance # of ProxyDataSharingConsent if representing a multi-course consent record. consent_record = consent_record.save() or consent_record except ConsentAPIRequestError as invalid_request: return Response({'error': str(invalid_request)}, status=HTTP_400_BAD_REQUEST) return Response(consent_record.serialize())
[ "def", "post", "(", "self", ",", "request", ")", ":", "try", ":", "consent_record", "=", "self", ".", "get_consent_record", "(", "request", ")", "if", "consent_record", "is", "None", ":", "return", "self", ".", "get_no_record_response", "(", "request", ")", ...
43.684211
25.315789
def stop(self, timeout=None): """ Initiates a graceful stop of the processes """ self.stopping = True for process in list(self.processes): self.stop_process(process, timeout=timeout)
[ "def", "stop", "(", "self", ",", "timeout", "=", "None", ")", ":", "self", ".", "stopping", "=", "True", "for", "process", "in", "list", "(", "self", ".", "processes", ")", ":", "self", ".", "stop_process", "(", "process", ",", "timeout", "=", "timeo...
30.571429
17.428571
def mass_3d(self, r, rho0, gamma): """ mass enclosed a 3d sphere or radius r :param r: :param a: :param s: :return: """ mass_3d = 4 * np.pi * rho0 /(-gamma + 3) * r ** (-gamma + 3) return mass_3d
[ "def", "mass_3d", "(", "self", ",", "r", ",", "rho0", ",", "gamma", ")", ":", "mass_3d", "=", "4", "*", "np", ".", "pi", "*", "rho0", "/", "(", "-", "gamma", "+", "3", ")", "*", "r", "**", "(", "-", "gamma", "+", "3", ")", "return", "mass_3...
25.8
15
def word_groups_for_language(language_code): """ Return the math word groups for a language code. The language_code should be an ISO 639-2 language code. https://www.loc.gov/standards/iso639-2/php/code_list.php """ if language_code not in LANGUAGE_CODES: message = '{} is not an available language code'.format(language_code) raise InvalidLanguageCodeException(message) return MATH_WORDS[language_code]
[ "def", "word_groups_for_language", "(", "language_code", ")", ":", "if", "language_code", "not", "in", "LANGUAGE_CODES", ":", "message", "=", "'{} is not an available language code'", ".", "format", "(", "language_code", ")", "raise", "InvalidLanguageCodeException", "(", ...
36.416667
15.916667
def entries_view(self, request, form_id): """ Displays the form entries in a HTML table with option to export as CSV file. """ if request.POST.get("back"): change_url = admin_url(Form, "change", form_id) return HttpResponseRedirect(change_url) form = get_object_or_404(Form, id=form_id) entries_form = EntriesForm(form, request, request.POST or None) delete_entries_perm = "%s.delete_formentry" % FormEntry._meta.app_label can_delete_entries = request.user.has_perm(delete_entries_perm) submitted = entries_form.is_valid() if submitted: if request.POST.get("export"): response = HttpResponse(content_type="text/csv") timestamp = slugify(datetime.now().ctime()) fname = "%s-%s.csv" % (form.slug, timestamp) header = "attachment; filename=%s" % fname response["Content-Disposition"] = header queue = StringIO() delimiter = settings.FORMS_CSV_DELIMITER try: csv = writer(queue, delimiter=delimiter) writerow = csv.writerow except TypeError: queue = BytesIO() delimiter = bytes(delimiter, encoding="utf-8") csv = writer(queue, delimiter=delimiter) writerow = lambda row: csv.writerow([c.encode("utf-8") if hasattr(c, "encode") else c for c in row]) writerow(entries_form.columns()) for row in entries_form.rows(csv=True): writerow(row) data = queue.getvalue() response.write(data) return response elif request.POST.get("delete") and can_delete_entries: selected = request.POST.getlist("selected") if selected: entries = FormEntry.objects.filter(id__in=selected) count = entries.count() if count > 0: entries.delete() message = ungettext("1 entry deleted", "%(count)s entries deleted", count) info(request, message % {"count": count}) template = "admin/forms/entries.html" context = {"title": _("View Entries"), "entries_form": entries_form, "opts": self.model._meta, "original": form, "can_delete_entries": can_delete_entries, "submitted": submitted} return render(request, template, context)
[ "def", "entries_view", "(", "self", ",", "request", ",", "form_id", ")", ":", "if", "request", ".", "POST", ".", "get", "(", "\"back\"", ")", ":", "change_url", "=", "admin_url", "(", "Form", ",", "\"change\"", ",", "form_id", ")", "return", "HttpRespons...
49.981132
15.301887
def deep_del(data, fn): """Create dict copy with removed items. Recursively remove items where fn(value) is True. Returns: dict: New dict with matching items removed. """ result = {} for k, v in data.iteritems(): if not fn(v): if isinstance(v, dict): result[k] = deep_del(v, fn) else: result[k] = v return result
[ "def", "deep_del", "(", "data", ",", "fn", ")", ":", "result", "=", "{", "}", "for", "k", ",", "v", "in", "data", ".", "iteritems", "(", ")", ":", "if", "not", "fn", "(", "v", ")", ":", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "...
22.166667
19.166667
def _compute_rtfilter_map(self): """Returns neighbor's RT filter (permit/allow filter based on RT). Walks RT filter tree and computes current RT filters for each peer that have advertised RT NLRIs. Returns: dict of peer, and `set` of rts that a particular neighbor is interested in. """ rtfilter_map = {} def get_neigh_filter(neigh): neigh_filter = rtfilter_map.get(neigh) # Lazy creation of neighbor RT filter if neigh_filter is None: neigh_filter = set() rtfilter_map[neigh] = neigh_filter return neigh_filter # Check if we have to use all paths or just best path if self._common_config.max_path_ext_rtfilter_all: # We have to look at all paths for a RtDest for rtcdest in self._table_manager.get_rtc_table().values(): known_path_list = rtcdest.known_path_list for path in known_path_list: neigh = path.source # We ignore NC if neigh is None: continue neigh_filter = get_neigh_filter(neigh) neigh_filter.add(path.nlri.route_target) else: # We iterate over all destination of the RTC table and for iBGP # peers we use all known paths' RTs for RT filter and for eBGP # peers we only consider best-paths' RTs for RT filter for rtcdest in self._table_manager.get_rtc_table().values(): path = rtcdest.best_path # If this destination does not have any path, we continue if not path: continue neigh = path.source # Consider only eBGP peers and ignore NC if neigh and neigh.is_ebgp_peer(): # For eBGP peers we use only best-path to learn RT filter neigh_filter = get_neigh_filter(neigh) neigh_filter.add(path.nlri.route_target) else: # For iBGP peers we use all known paths to learn RT filter known_path_list = rtcdest.known_path_list for path in known_path_list: neigh = path.source # We ignore NC, and eBGP peers if neigh and not neigh.is_ebgp_peer(): neigh_filter = get_neigh_filter(neigh) neigh_filter.add(path.nlri.route_target) return rtfilter_map
[ "def", "_compute_rtfilter_map", "(", "self", ")", ":", "rtfilter_map", "=", "{", "}", "def", "get_neigh_filter", "(", "neigh", ")", ":", "neigh_filter", "=", "rtfilter_map", ".", "get", "(", "neigh", ")", "# Lazy creation of neighbor RT filter", "if", "neigh_filte...
43.25
19
def _set_data(self): """ This method will be called to set Series data """ if getattr(self, 'data', False) and not getattr(self, '_x', False) and not getattr(self, '_y', False): _x = XVariable() _y = YVariable() _x.contribute_to_class(self, 'X', self.data) _y.contribute_to_class(self, 'Y', self.data) self['data'] = zip(self._x.points, self._y.points) else: for axis in ('_x', '_y'): axis_obj = getattr(self, axis, False) if not axis_obj: raise exception.MissingAxisException("%s missing" % axis) if not getattr(axis_obj, 'points', False): raise exception.MissingDataException() self['data'] = zip(self._x.points, self._y.points)
[ "def", "_set_data", "(", "self", ")", ":", "if", "getattr", "(", "self", ",", "'data'", ",", "False", ")", "and", "not", "getattr", "(", "self", ",", "'_x'", ",", "False", ")", "and", "not", "getattr", "(", "self", ",", "'_y'", ",", "False", ")", ...
43.473684
19.263158
def unflatten(master): """ :param dict master: a multilevel dictionary :return: a unflattened dictionary :rtype: dict Unflattens a single-level dictionary a multilevel into one so that:: {'foo.bar.a': 1, 'foo.bar.b': True, 'foo.bar.a': 1, } would become:: {'foo': {'bar': { 'a': 1, 'b': True, 'c': 'hello', }, }, } """ result = {} for k, v in master.items(): *first, last = k.split('.') r = result for i in first: r = r.setdefault(i, {}) r[last] = v return result
[ "def", "unflatten", "(", "master", ")", ":", "result", "=", "{", "}", "for", "k", ",", "v", "in", "master", ".", "items", "(", ")", ":", "*", "first", ",", "last", "=", "k", ".", "split", "(", "'.'", ")", "r", "=", "result", "for", "i", "in",...
19.485714
20.857143
def _operation_speak_as_spell_out(self, content, index, children): """ The operation method of _speak_as method for spell-out. :param content: The text content of element. :type content: str :param index: The index of pattern in text content of element. :type index: int :param children: The children of element. :type children: list(hatemile.util.html.htmldomelement.HTMLDOMElement) """ children.append(self._create_content_element( content[0:(index + 1)], 'spell-out' )) children.append(self._create_aural_content_element(' ', 'spell-out')) return children
[ "def", "_operation_speak_as_spell_out", "(", "self", ",", "content", ",", "index", ",", "children", ")", ":", "children", ".", "append", "(", "self", ".", "_create_content_element", "(", "content", "[", "0", ":", "(", "index", "+", "1", ")", "]", ",", "'...
33.55
22.35
def set_background(self, color, loc='all'): """ Sets background color Parameters ---------- color : string or 3 item list, optional, defaults to white Either a string, rgb list, or hex color string. For example: color='white' color='w' color=[1, 1, 1] color='#FFFFFF' loc : int, tuple, list, or str, optional Index of the renderer to add the actor to. For example, ``loc=2`` or ``loc=(1, 1)``. If ``loc='all'`` then all render windows will have their background set. """ if color is None: color = rcParams['background'] if isinstance(color, str): if color.lower() in 'paraview' or color.lower() in 'pv': # Use the default ParaView background color color = PV_BACKGROUND else: color = vtki.string_to_rgb(color) if loc =='all': for renderer in self.renderers: renderer.SetBackground(color) else: renderer = self.renderers[self.loc_to_index(loc)] renderer.SetBackground(color)
[ "def", "set_background", "(", "self", ",", "color", ",", "loc", "=", "'all'", ")", ":", "if", "color", "is", "None", ":", "color", "=", "rcParams", "[", "'background'", "]", "if", "isinstance", "(", "color", ",", "str", ")", ":", "if", "color", ".", ...
34.794118
17.088235
def set_many(self, mapping, timeout=None): """Sets multiple keys and values from a mapping. :param mapping: a mapping with the keys/values to set. :param timeout: the cache timeout for the key (if not specified, it uses the default timeout). """ for key, value in _items(mapping): self.set(key, value, timeout)
[ "def", "set_many", "(", "self", ",", "mapping", ",", "timeout", "=", "None", ")", ":", "for", "key", ",", "value", "in", "_items", "(", "mapping", ")", ":", "self", ".", "set", "(", "key", ",", "value", ",", "timeout", ")" ]
42.111111
12.444444
def set_value(self, *args, **kwargs): """ Quickly set single value at (item, major, minor) location. .. deprecated:: 0.21.0 Please use .at[] or .iat[] accessors. Parameters ---------- item : item label (panel item) major : major axis label (panel item row) minor : minor axis label (panel item column) value : scalar takeable : interpret the passed labels as indexers, default False Returns ------- panel : Panel If label combo is contained, will be reference to calling Panel, otherwise a new object. """ warnings.warn("set_value is deprecated and will be removed " "in a future release. Please use " ".at[] or .iat[] accessors instead", FutureWarning, stacklevel=2) return self._set_value(*args, **kwargs)
[ "def", "set_value", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "\"set_value is deprecated and will be removed \"", "\"in a future release. Please use \"", "\".at[] or .iat[] accessors instead\"", ",", "FutureWarning", "...
33.740741
19.296296
def _all_number_groups_are_exactly_present(numobj, normalized_candidate, formatted_number_groups): """Returns True if the groups of digits found in our candidate phone number match our expectations. Arguments: numobj -- the original number we found when parsing normalized_candidate -- the candidate number, normalized to only contain ASCII digits, but with non-digits (spaces etc) retained expected_number_groups -- the groups of digits that we would expect to see if we formatted this number Returns True if expectations matched. """ candidate_groups = re.split(NON_DIGITS_PATTERN, normalized_candidate) # Set this to the last group, skipping it if the number has an extension. if numobj.extension is not None: candidate_number_group_index = len(candidate_groups) - 2 else: candidate_number_group_index = len(candidate_groups) - 1 # First we check if the national significant number is formatted as a # block. We use contains and not equals, since the national significant # number may be present with a prefix such as a national number prefix, or # the country code itself. if (len(candidate_groups) == 1 or candidate_groups[candidate_number_group_index].find(national_significant_number(numobj)) != -1): return True # Starting from the end, go through in reverse, excluding the first group, # and check the candidate and number groups are the same. formatted_number_group_index = len(formatted_number_groups) - 1 while (formatted_number_group_index > 0 and candidate_number_group_index >= 0): if (candidate_groups[candidate_number_group_index] != formatted_number_groups[formatted_number_group_index]): return False formatted_number_group_index -= 1 candidate_number_group_index -= 1 # Now check the first group. There may be a national prefix at the start, so we only check # that the candidate group ends with the formatted number group. return (candidate_number_group_index >= 0 and candidate_groups[candidate_number_group_index].endswith(formatted_number_groups[0]))
[ "def", "_all_number_groups_are_exactly_present", "(", "numobj", ",", "normalized_candidate", ",", "formatted_number_groups", ")", ":", "candidate_groups", "=", "re", ".", "split", "(", "NON_DIGITS_PATTERN", ",", "normalized_candidate", ")", "# Set this to the last group, skip...
56.5
25.710526
def delete_file(self, path): """Delete the file or directory at path. """ self.log.debug("S3contents.GenericManager: delete_file '%s'", path) if self.file_exists(path) or self.dir_exists(path): self.fs.rm(path) else: self.no_such_entity(path)
[ "def", "delete_file", "(", "self", ",", "path", ")", ":", "self", ".", "log", ".", "debug", "(", "\"S3contents.GenericManager: delete_file '%s'\"", ",", "path", ")", "if", "self", ".", "file_exists", "(", "path", ")", "or", "self", ".", "dir_exists", "(", ...
37.375
13.5
def open_resource(self, path: FilePath, mode: str='rb') -> IO[AnyStr]: """Open a file for reading. Use as .. code-block:: python with app.open_resouce(path) as file_: file_.read() """ if mode not in {'r', 'rb'}: raise ValueError('Files can only be opened for reading') return open(self.root_path / file_path_to_path(path), mode)
[ "def", "open_resource", "(", "self", ",", "path", ":", "FilePath", ",", "mode", ":", "str", "=", "'rb'", ")", "->", "IO", "[", "AnyStr", "]", ":", "if", "mode", "not", "in", "{", "'r'", ",", "'rb'", "}", ":", "raise", "ValueError", "(", "'Files can...
31.307692
20.538462
def get_chaotic_pairs(graph: BELGraph) -> SetOfNodePairs: """Find pairs of nodes that have mutual causal edges that are increasing each other such that ``A -> B`` and ``B -> A``. :return: A set of pairs of nodes with mutual causal edges """ cg = get_causal_subgraph(graph) results = set() for u, v, d in cg.edges(data=True): if d[RELATION] not in CAUSAL_INCREASE_RELATIONS: continue if cg.has_edge(v, u) and any(dd[RELATION] in CAUSAL_INCREASE_RELATIONS for dd in cg[v][u].values()): results.add(tuple(sorted([u, v], key=str))) return results
[ "def", "get_chaotic_pairs", "(", "graph", ":", "BELGraph", ")", "->", "SetOfNodePairs", ":", "cg", "=", "get_causal_subgraph", "(", "graph", ")", "results", "=", "set", "(", ")", "for", "u", ",", "v", ",", "d", "in", "cg", ".", "edges", "(", "data", ...
33.444444
23.944444
async def mod(self, iden, query): ''' Change the query of an appointment ''' appt = self.appts.get(iden) if appt is None: raise s_exc.NoSuchIden() if not query: raise ValueError('empty query') if self.enabled: self.core.getStormQuery(query) appt.query = query appt.enabled = True # in case it was disabled for a bad query await self._storeAppt(appt)
[ "async", "def", "mod", "(", "self", ",", "iden", ",", "query", ")", ":", "appt", "=", "self", ".", "appts", ".", "get", "(", "iden", ")", "if", "appt", "is", "None", ":", "raise", "s_exc", ".", "NoSuchIden", "(", ")", "if", "not", "query", ":", ...
25.166667
18.944444
def _debug_line(linenum: int, line: str, extramsg: str = "") -> None: """ Writes a debugging report on a line. """ log.critical("{}Line {}: {!r}", extramsg, linenum, line)
[ "def", "_debug_line", "(", "linenum", ":", "int", ",", "line", ":", "str", ",", "extramsg", ":", "str", "=", "\"\"", ")", "->", "None", ":", "log", ".", "critical", "(", "\"{}Line {}: {!r}\"", ",", "extramsg", ",", "linenum", ",", "line", ")" ]
39.8
11.4
def format_system_message(errno): """ Call FormatMessage with a system error number to retrieve the descriptive error message. """ # first some flags used by FormatMessageW ALLOCATE_BUFFER = 0x100 FROM_SYSTEM = 0x1000 # Let FormatMessageW allocate the buffer (we'll free it below) # Also, let it know we want a system error message. flags = ALLOCATE_BUFFER | FROM_SYSTEM source = None message_id = errno language_id = 0 result_buffer = ctypes.wintypes.LPWSTR() buffer_size = 0 arguments = None bytes = ctypes.windll.kernel32.FormatMessageW( flags, source, message_id, language_id, ctypes.byref(result_buffer), buffer_size, arguments, ) # note the following will cause an infinite loop if GetLastError # repeatedly returns an error that cannot be formatted, although # this should not happen. handle_nonzero_success(bytes) message = result_buffer.value ctypes.windll.kernel32.LocalFree(result_buffer) return message
[ "def", "format_system_message", "(", "errno", ")", ":", "# first some flags used by FormatMessageW", "ALLOCATE_BUFFER", "=", "0x100", "FROM_SYSTEM", "=", "0x1000", "# Let FormatMessageW allocate the buffer (we'll free it below)", "# Also, let it know we want a system error message.", "...
30.588235
16.294118
def _init_parameters_random(self, X_bin): """Initialise parameters for unsupervised learning. """ _, n_features = X_bin.shape # The parameter class_log_prior_ has shape (2,). The values represent # 'match' and 'non-match'. rand_vals = np.random.rand(2) class_prior = rand_vals / np.sum(rand_vals) # make empty array of feature log probs # dimensions 2xn_features feature_prob = np.zeros((2, n_features)) feat_i = 0 for i, bin in enumerate(self._binarizers): bin_len = bin.classes_.shape[0] rand_vals_0 = np.random.rand(bin_len) feature_prob[0, feat_i:feat_i + bin_len] = \ rand_vals_0 / np.sum(rand_vals_0) rand_vals_1 = np.random.rand(bin_len) feature_prob[1, feat_i:feat_i + bin_len] = \ rand_vals_1 / np.sum(rand_vals_1) feat_i += bin_len return np.log(class_prior), np.log(feature_prob)
[ "def", "_init_parameters_random", "(", "self", ",", "X_bin", ")", ":", "_", ",", "n_features", "=", "X_bin", ".", "shape", "# The parameter class_log_prior_ has shape (2,). The values represent", "# 'match' and 'non-match'.", "rand_vals", "=", "np", ".", "random", ".", ...
29.606061
19.848485
def from_stub(cls, data, udas=None): """ Create a Task from an already deserialized dict. """ udas = udas or {} fields = cls.FIELDS.copy() fields.update(udas) processed = {} for k, v in six.iteritems(data): processed[k] = cls._serialize(k, v, fields) return cls(processed, udas)
[ "def", "from_stub", "(", "cls", ",", "data", ",", "udas", "=", "None", ")", ":", "udas", "=", "udas", "or", "{", "}", "fields", "=", "cls", ".", "FIELDS", ".", "copy", "(", ")", "fields", ".", "update", "(", "udas", ")", "processed", "=", "{", ...
28.166667
16.333333
def simBirth(self,which_agents): ''' Makes new consumers for the given indices. Initialized variables include aNrm and pLvl, as well as time variables t_age and t_cycle. Normalized assets and permanent income levels are drawn from lognormal distributions given by aNrmInitMean and aNrmInitStd (etc). Parameters ---------- which_agents : np.array(Bool) Boolean array of size self.AgentCount indicating which agents should be "born". Returns ------- None ''' # Get and store states for newly born agents N = np.sum(which_agents) # Number of new consumers to make self.aNrmNow[which_agents] = drawLognormal(N,mu=self.aNrmInitMean,sigma=self.aNrmInitStd,seed=self.RNG.randint(0,2**31-1)) pLvlInitMeanNow = self.pLvlInitMean + np.log(self.PlvlAggNow) # Account for newer cohorts having higher permanent income self.pLvlNow[which_agents] = drawLognormal(N,mu=pLvlInitMeanNow,sigma=self.pLvlInitStd,seed=self.RNG.randint(0,2**31-1)) self.t_age[which_agents] = 0 # How many periods since each agent was born self.t_cycle[which_agents] = 0 # Which period of the cycle each agent is currently in return None
[ "def", "simBirth", "(", "self", ",", "which_agents", ")", ":", "# Get and store states for newly born agents", "N", "=", "np", ".", "sum", "(", "which_agents", ")", "# Number of new consumers to make", "self", ".", "aNrmNow", "[", "which_agents", "]", "=", "drawLogn...
54.130435
39.521739
def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True): """Return affine transform matrix to register two point sets. v0 and v1 are shape (ndims, \*) arrays of at least ndims non-homogeneous coordinates, where ndims is the dimensionality of the coordinate space. If shear is False, a similarity transformation matrix is returned. If also scale is False, a rigid/Eucledian transformation matrix is returned. By default the algorithm by Hartley and Zissermann [15] is used. If usesvd is True, similarity and Eucledian transformation matrices are calculated by minimizing the weighted sum of squared deviations (RMSD) according to the algorithm by Kabsch [8]. Otherwise, and if ndims is 3, the quaternion based algorithm by Horn [9] is used, which is slower when using this Python implementation. The returned matrix performs rotation, translation and uniform scaling (if specified). >>> v0 = [[0, 1031, 1031, 0], [0, 0, 1600, 1600]] >>> v1 = [[675, 826, 826, 677], [55, 52, 281, 277]] >>> affine_matrix_from_points(v0, v1) array([[ 0.14549, 0.00062, 675.50008], [ 0.00048, 0.14094, 53.24971], [ 0. , 0. , 1. ]]) >>> T = translation_matrix(numpy.random.random(3)-0.5) >>> R = random_rotation_matrix(numpy.random.random(3)) >>> S = scale_matrix(random.random()) >>> M = concatenate_matrices(T, R, S) >>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20 >>> v0[3] = 1 >>> v1 = numpy.dot(M, v0) >>> v0[:3] += numpy.random.normal(0, 1e-8, 300).reshape(3, -1) >>> M = affine_matrix_from_points(v0[:3], v1[:3]) >>> numpy.allclose(v1, numpy.dot(M, v0)) True More examples in superimposition_matrix() """ v0 = numpy.array(v0, dtype=numpy.float64, copy=True) v1 = numpy.array(v1, dtype=numpy.float64, copy=True) ndims = v0.shape[0] if ndims < 2 or v0.shape[1] < ndims or v0.shape != v1.shape: raise ValueError("input arrays are of wrong shape or type") # move centroids to origin t0 = -numpy.mean(v0, axis=1) M0 = numpy.identity(ndims+1) M0[:ndims, ndims] = t0 v0 += t0.reshape(ndims, 1) t1 = -numpy.mean(v1, axis=1) M1 = numpy.identity(ndims+1) M1[:ndims, ndims] = t1 v1 += t1.reshape(ndims, 1) if shear: # Affine transformation A = numpy.concatenate((v0, v1), axis=0) u, s, vh = numpy.linalg.svd(A.T) vh = vh[:ndims].T B = vh[:ndims] C = vh[ndims:2*ndims] t = numpy.dot(C, numpy.linalg.pinv(B)) t = numpy.concatenate((t, numpy.zeros((ndims, 1))), axis=1) M = numpy.vstack((t, ((0.0,)*ndims) + (1.0,))) elif usesvd or ndims != 3: # Rigid transformation via SVD of covariance matrix u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T)) # rotation matrix from SVD orthonormal bases R = numpy.dot(u, vh) if numpy.linalg.det(R) < 0.0: # R does not constitute right handed system R -= numpy.outer(u[:, ndims-1], vh[ndims-1, :]*2.0) s[-1] *= -1.0 # homogeneous transformation matrix M = numpy.identity(ndims+1) M[:ndims, :ndims] = R else: # Rigid transformation matrix via quaternion # compute symmetric matrix N xx, yy, zz = numpy.sum(v0 * v1, axis=1) xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1) xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1) N = [[xx+yy+zz, 0.0, 0.0, 0.0], [yz-zy, xx-yy-zz, 0.0, 0.0], [zx-xz, xy+yx, yy-xx-zz, 0.0], [xy-yx, zx+xz, yz+zy, zz-xx-yy]] # quaternion: eigenvector corresponding to most positive eigenvalue w, V = numpy.linalg.eigh(N) q = V[:, numpy.argmax(w)] q /= vector_norm(q) # unit quaternion # homogeneous transformation matrix M = quaternion_matrix(q) if scale and not shear: # Affine transformation; scale is ratio of RMS deviations from centroid v0 *= v0 v1 *= v1 M[:ndims, :ndims] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0)) # move centroids back M = numpy.dot(numpy.linalg.inv(M1), numpy.dot(M, M0)) M /= M[ndims, ndims] return M
[ "def", "affine_matrix_from_points", "(", "v0", ",", "v1", ",", "shear", "=", "True", ",", "scale", "=", "True", ",", "usesvd", "=", "True", ")", ":", "v0", "=", "numpy", ".", "array", "(", "v0", ",", "dtype", "=", "numpy", ".", "float64", ",", "cop...
39.738318
18.672897
def process_request(self, request): """Adds data necessary for Horizon to function to the request.""" # Activate timezone handling tz = request.session.get('django_timezone') if tz: timezone.activate(tz) # Check for session timeout try: timeout = settings.SESSION_TIMEOUT except AttributeError: timeout = 1800 last_activity = request.session.get('last_activity', None) timestamp = int(time.time()) request.horizon = {'dashboard': None, 'panel': None, 'async_messages': []} if not hasattr(request, "user") or not request.user.is_authenticated(): # proceed no further if the current request is already known # not to be authenticated return None if request.is_ajax(): # if the request is Ajax we do not want to proceed, as clients can # 1) create pages with constant polling, which can create race # conditions when a page navigation occurs # 2) might leave a user seemingly left logged in forever # 3) thrashes db backed session engines with tons of changes return None # If we use cookie-based sessions, check that the cookie size does not # reach the max size accepted by common web browsers. if ( settings.SESSION_ENGINE == 'django.contrib.sessions.backends.signed_cookies' ): max_cookie_size = getattr( settings, 'SESSION_COOKIE_MAX_SIZE', None) session_cookie_name = getattr( settings, 'SESSION_COOKIE_NAME', None) session_key = request.COOKIES.get(session_cookie_name) if max_cookie_size is not None and session_key is not None: cookie_size = sum(( len(key) + len(value) for key, value in six.iteritems(request.COOKIES) )) if cookie_size >= max_cookie_size: LOG.error( 'Total Cookie size for user_id: %(user_id)s is ' '%(cookie_size)sB >= %(max_cookie_size)sB. ' 'You need to configure file-based or database-backed ' 'sessions instead of cookie-based sessions: ' 'http://docs.openstack.org/developer/horizon/topics/' 'deployment.html#session-storage' % { 'user_id': request.session.get( 'user_id', 'Unknown'), 'cookie_size': cookie_size, 'max_cookie_size': max_cookie_size, } ) request.session['last_activity'] = timestamp
[ "def", "process_request", "(", "self", ",", "request", ")", ":", "# Activate timezone handling", "tz", "=", "request", ".", "session", ".", "get", "(", "'django_timezone'", ")", "if", "tz", ":", "timezone", ".", "activate", "(", "tz", ")", "# Check for session...
45.253968
19.238095
def resolve_colors(n_colors=None, colormap=None, colors=None): """ Generates a list of colors based on common color arguments, for example the name of a colormap or palette or another iterable of colors. The list is then truncated (or multiplied) to the specific number of requested colors. Parameters ---------- n_colors : int, default: None Specify the length of the list of returned colors, which will either truncate or multiple the colors available. If None the length of the colors will not be modified. colormap : str, default: None The name of the matplotlib color map with which to generate colors. colors : iterable, default: None A collection of colors to use specifically with the plot. Returns ------- colors : list A list of colors that can be used in matplotlib plots. Notes ----- This function was originally based on a similar function in the pandas plotting library that has been removed in the new version of the library. """ # Work with the colormap if specified and colors is not if colormap is not None and colors is None: if isinstance(colormap, str): try: colormap = cm.get_cmap(colormap) except ValueError as e: raise YellowbrickValueError(e) n_colors = n_colors or len(get_color_cycle()) _colors = list(map(colormap, np.linspace(0, 1, num=n_colors))) # Work with the color list elif colors is not None: # Warn if both colormap and colors is specified. if colormap is not None: warnings.warn( "both colormap and colors specified; using colors" ) _colors = list(colors) # Ensure colors is a list # Get the default colors else: _colors = get_color_cycle() # Truncate or multiple the color list according to the number of colors if n_colors is not None and len(_colors) != n_colors: _colors = [ _colors[idx % len(_colors)] for idx in np.arange(n_colors) ] return _colors
[ "def", "resolve_colors", "(", "n_colors", "=", "None", ",", "colormap", "=", "None", ",", "colors", "=", "None", ")", ":", "# Work with the colormap if specified and colors is not", "if", "colormap", "is", "not", "None", "and", "colors", "is", "None", ":", "if",...
31.969231
24.707692
def tree(path, depth=2, topdown=True, followlinks=False, showhidden=False): """A generator return a tuple with three elements (root, dirs, files).""" rt = [] for root, dirs, files in os.walk(path, topdown=topdown, followlinks=followlinks): if not showhidden and File.is_hidden(root): continue current_depth = len(os.path.relpath(root, path).split(os.sep)) if current_depth > depth: continue if showhidden: _tuple = ( root, [File(os.path.join(root, _dir)) for _dir in dirs], [File(os.path.join(root, _file)) for _file in files] ) else: _tuple = ( root, [File(os.path.join(root, _dir)) for _dir in dirs if _dir[0] != '.'], [File(os.path.join(root, _file)) for _file in files if _file[0] != '.'] ) rt.append(_tuple) return rt
[ "def", "tree", "(", "path", ",", "depth", "=", "2", ",", "topdown", "=", "True", ",", "followlinks", "=", "False", ",", "showhidden", "=", "False", ")", ":", "rt", "=", "[", "]", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", ...
35.846154
26.038462
def _get_to_many_relationship_value(self, obj, column): """ Get the resulting datas for a One To many or a many to many relationship :param obj obj: The instance we manage :param dict column: The column description dictionnary :returns: The associated value """ related_key = column.get('related_key', None) related = getattr(obj, column['__col__'].key) value = {} if related: total = len(related) for index, rel_obj in enumerate(related): if related_key: compiled_res = self._get_formatted_val( rel_obj, related_key, column ) else: compiled_res = column['__prop__'].compile_obj( rel_obj ) value['item_%d' % index] = compiled_res value[str(index)] = compiled_res value["_" + str(index)] = compiled_res if index == 0: value['first'] = compiled_res if index == total - 1: value['last'] = compiled_res return value
[ "def", "_get_to_many_relationship_value", "(", "self", ",", "obj", ",", "column", ")", ":", "related_key", "=", "column", ".", "get", "(", "'related_key'", ",", "None", ")", "related", "=", "getattr", "(", "obj", ",", "column", "[", "'__col__'", "]", ".", ...
34.558824
17.441176
def load_texture(self, texture_version): ''' Expect a texture version number as an integer, load the texture version from /is/ps/shared/data/body/template/texture_coordinates/. Currently there are versions [0, 1, 2, 3] availiable. ''' import numpy as np lowres_tex_template = 's3://bodylabs-korper-assets/is/ps/shared/data/body/template/texture_coordinates/textured_template_low_v%d.obj' % texture_version highres_tex_template = 's3://bodylabs-korper-assets/is/ps/shared/data/body/template/texture_coordinates/textured_template_high_v%d.obj' % texture_version from lace.mesh import Mesh from lace.cache import sc mesh_with_texture = Mesh(filename=sc(lowres_tex_template)) if not np.all(mesh_with_texture.f.shape == self.f.shape): mesh_with_texture = Mesh(filename=sc(highres_tex_template)) self.transfer_texture(mesh_with_texture)
[ "def", "load_texture", "(", "self", ",", "texture_version", ")", ":", "import", "numpy", "as", "np", "lowres_tex_template", "=", "'s3://bodylabs-korper-assets/is/ps/shared/data/body/template/texture_coordinates/textured_template_low_v%d.obj'", "%", "texture_version", "highres_tex_t...
66.071429
38.214286
def _unicode(ctx, text): """ Returns a numeric code for the first character in a text string """ text = conversions.to_string(text, ctx) if len(text) == 0: raise ValueError("Text can't be empty") return ord(text[0])
[ "def", "_unicode", "(", "ctx", ",", "text", ")", ":", "text", "=", "conversions", ".", "to_string", "(", "text", ",", "ctx", ")", "if", "len", "(", "text", ")", "==", "0", ":", "raise", "ValueError", "(", "\"Text can't be empty\"", ")", "return", "ord"...
30
11
def QA_util_get_trade_gap(start, end): '返回start_day到end_day中间有多少个交易天 算首尾' start, end = QA_util_get_real_datelist(start, end) if start is not None: return trade_date_sse.index(end) + 1 - trade_date_sse.index(start) else: return 0
[ "def", "QA_util_get_trade_gap", "(", "start", ",", "end", ")", ":", "start", ",", "end", "=", "QA_util_get_real_datelist", "(", "start", ",", "end", ")", "if", "start", "is", "not", "None", ":", "return", "trade_date_sse", ".", "index", "(", "end", ")", ...
36.285714
17.428571
def insert(self, name, value, timestamp=None, intervals=0, **kwargs): ''' Insert a value for the timeseries "name". For each interval in the configuration, will insert the value into a bucket for the interval "timestamp". If time is not supplied, will default to time.time(), else it should be a floating point value. If "intervals" is less than 0, inserts the value into timestamps "abs(intervals)" preceeding "timestamp" (i.e. "-1" inserts one extra value). If "intervals" is greater than 0, inserts the value into that many more intervals after "timestamp". The default behavior is to insert for a single timestamp. This supports the public methods of the same name in the subclasses. The value is expected to already be converted. ''' if not timestamp: timestamp = time.time() if isinstance(value, (list,tuple,set)): if self._write_func: value = [ self._write_func(v) for v in value ] return self._batch_insert({timestamp:{name:value}}, intervals, **kwargs) if self._write_func: value = self._write_func(value) # TODO: document acceptable names # TODO: document what types values are supported # TODO: document behavior when time is outside the bounds of TTLed config # TODO: document how the data is stored. # TODO: better abstraction for "intervals" processing rather than in each implementation self._insert( name, value, timestamp, intervals, **kwargs )
[ "def", "insert", "(", "self", ",", "name", ",", "value", ",", "timestamp", "=", "None", ",", "intervals", "=", "0", ",", "*", "*", "kwargs", ")", ":", "if", "not", "timestamp", ":", "timestamp", "=", "time", ".", "time", "(", ")", "if", "isinstance...
42.764706
26.352941
def makeArg(segID, N, CA, C, O, geo): '''Creates an Arginie residue''' ##R-Group CA_CB_length=geo.CA_CB_length C_CA_CB_angle=geo.C_CA_CB_angle N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle CB_CG_length=geo.CB_CG_length CA_CB_CG_angle= geo.CA_CB_CG_angle N_CA_CB_CG_diangle=geo.N_CA_CB_CG_diangle CG_CD_length=geo.CG_CD_length CB_CG_CD_angle=geo.CB_CG_CD_angle CA_CB_CG_CD_diangle=geo.CA_CB_CG_CD_diangle CD_NE_length=geo.CD_NE_length CG_CD_NE_angle=geo.CG_CD_NE_angle CB_CG_CD_NE_diangle=geo.CB_CG_CD_NE_diangle NE_CZ_length=geo.NE_CZ_length CD_NE_CZ_angle=geo.CD_NE_CZ_angle CG_CD_NE_CZ_diangle=geo.CG_CD_NE_CZ_diangle CZ_NH1_length=geo.CZ_NH1_length NE_CZ_NH1_angle=geo.NE_CZ_NH1_angle CD_NE_CZ_NH1_diangle=geo.CD_NE_CZ_NH1_diangle CZ_NH2_length=geo.CZ_NH2_length NE_CZ_NH2_angle=geo.NE_CZ_NH2_angle CD_NE_CZ_NH2_diangle=geo.CD_NE_CZ_NH2_diangle carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle) CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C") carbon_g= calculateCoordinates(N, CA, CB, CB_CG_length, CA_CB_CG_angle, N_CA_CB_CG_diangle) CG= Atom("CG", carbon_g, 0.0, 1.0, " ", " CG", 0, "C") carbon_d= calculateCoordinates(CA, CB, CG, CG_CD_length, CB_CG_CD_angle, CA_CB_CG_CD_diangle) CD= Atom("CD", carbon_d, 0.0, 1.0, " ", " CD", 0, "C") nitrogen_e= calculateCoordinates(CB, CG, CD, CD_NE_length, CG_CD_NE_angle, CB_CG_CD_NE_diangle) NE= Atom("NE", nitrogen_e, 0.0, 1.0, " ", " NE", 0, "N") carbon_z= calculateCoordinates(CG, CD, NE, NE_CZ_length, CD_NE_CZ_angle, CG_CD_NE_CZ_diangle) CZ= Atom("CZ", carbon_z, 0.0, 1.0, " ", " CZ", 0, "C") nitrogen_h1= calculateCoordinates(CD, NE, CZ, CZ_NH1_length, NE_CZ_NH1_angle, CD_NE_CZ_NH1_diangle) NH1= Atom("NH1", nitrogen_h1, 0.0, 1.0, " ", " NH1", 0, "N") nitrogen_h2= calculateCoordinates(CD, NE, CZ, CZ_NH2_length, NE_CZ_NH2_angle, CD_NE_CZ_NH2_diangle) NH2= Atom("NH2", nitrogen_h2, 0.0, 1.0, " ", " NH2", 0, "N") ##Create Residue Data Structure res= Residue((' ', segID, ' '), "ARG", ' ') res.add(N) res.add(CA) res.add(C) res.add(O) res.add(CB) res.add(CG) res.add(CD) res.add(NE) res.add(CZ) res.add(NH1) res.add(NH2) return res
[ "def", "makeArg", "(", "segID", ",", "N", ",", "CA", ",", "C", ",", "O", ",", "geo", ")", ":", "##R-Group", "CA_CB_length", "=", "geo", ".", "CA_CB_length", "C_CA_CB_angle", "=", "geo", ".", "C_CA_CB_angle", "N_C_CA_CB_diangle", "=", "geo", ".", "N_C_CA_...
38.166667
21.9
def send(self, send_email=True): """Marks the invoice as sent in Holvi If send_email is False then the invoice is *not* automatically emailed to the recipient and your must take care of sending the invoice yourself. """ url = str(self.api.base_url + '{code}/status/').format(code=self.code) # six.u messes this up payload = { 'mark_as_sent': True, 'send_email': send_email, } stat = self.api.connection.make_put(url, payload)
[ "def", "send", "(", "self", ",", "send_email", "=", "True", ")", ":", "url", "=", "str", "(", "self", ".", "api", ".", "base_url", "+", "'{code}/status/'", ")", ".", "format", "(", "code", "=", "self", ".", "code", ")", "# six.u messes this up", "paylo...
42
22.333333
def normalized_distance(self, image): """Calculates the distance of a given image to the original image. Parameters ---------- image : `numpy.ndarray` The image that should be compared to the original image. Returns ------- :class:`Distance` The distance between the given image and the original image. """ return self.__distance( self.__original_image_for_distance, image, bounds=self.bounds())
[ "def", "normalized_distance", "(", "self", ",", "image", ")", ":", "return", "self", ".", "__distance", "(", "self", ".", "__original_image_for_distance", ",", "image", ",", "bounds", "=", "self", ".", "bounds", "(", ")", ")" ]
27.368421
19.105263
def prt_summary_code(self, prt=sys.stdout): """Print summary of codes and groups that can be inputs to get_evcodes.""" prt.write('EVIDENCE GROUP AND CODES:\n') for grp, c2nt in self.grp2code2nt.items(): prt.write(' {GRP:19}: {CODES}\n'.format(GRP=grp, CODES=' '.join(c2nt.keys())))
[ "def", "prt_summary_code", "(", "self", ",", "prt", "=", "sys", ".", "stdout", ")", ":", "prt", ".", "write", "(", "'EVIDENCE GROUP AND CODES:\\n'", ")", "for", "grp", ",", "c2nt", "in", "self", ".", "grp2code2nt", ".", "items", "(", ")", ":", "prt", "...
63.2
14.8
def neighbor_add(self, address, remote_as, remote_port=DEFAULT_BGP_PORT, enable_ipv4=DEFAULT_CAP_MBGP_IPV4, enable_ipv6=DEFAULT_CAP_MBGP_IPV6, enable_vpnv4=DEFAULT_CAP_MBGP_VPNV4, enable_vpnv6=DEFAULT_CAP_MBGP_VPNV6, enable_evpn=DEFAULT_CAP_MBGP_EVPN, enable_ipv4fs=DEFAULT_CAP_MBGP_IPV4FS, enable_ipv6fs=DEFAULT_CAP_MBGP_IPV6FS, enable_vpnv4fs=DEFAULT_CAP_MBGP_VPNV4FS, enable_vpnv6fs=DEFAULT_CAP_MBGP_VPNV6FS, enable_l2vpnfs=DEFAULT_CAP_MBGP_L2VPNFS, enable_enhanced_refresh=DEFAULT_CAP_ENHANCED_REFRESH, enable_four_octet_as_number=DEFAULT_CAP_FOUR_OCTET_AS_NUMBER, next_hop=None, password=None, multi_exit_disc=None, site_of_origins=None, is_route_server_client=DEFAULT_IS_ROUTE_SERVER_CLIENT, is_route_reflector_client=DEFAULT_IS_ROUTE_REFLECTOR_CLIENT, is_next_hop_self=DEFAULT_IS_NEXT_HOP_SELF, local_address=None, local_port=None, local_as=None, connect_mode=DEFAULT_CONNECT_MODE): """ This method registers a new neighbor. The BGP speaker tries to establish a bgp session with the peer (accepts a connection from the peer and also tries to connect to it). ``address`` specifies the IP address of the peer. It must be the string representation of an IP address. Only IPv4 is supported now. ``remote_as`` specifies the AS number of the peer. It must be an integer between 1 and 65535. ``remote_port`` specifies the TCP port number of the peer. ``enable_ipv4`` enables IPv4 address family for this neighbor. ``enable_ipv6`` enables IPv6 address family for this neighbor. ``enable_vpnv4`` enables VPNv4 address family for this neighbor. ``enable_vpnv6`` enables VPNv6 address family for this neighbor. ``enable_evpn`` enables Ethernet VPN address family for this neighbor. ``enable_ipv4fs`` enables IPv4 Flow Specification address family for this neighbor. ``enable_ipv6fs`` enables IPv6 Flow Specification address family for this neighbor. ``enable_vpnv4fs`` enables VPNv4 Flow Specification address family for this neighbor. ``enable_vpnv6fs`` enables VPNv6 Flow Specification address family for this neighbor. ``enable_l2vpnfs`` enables L2VPN Flow Specification address family for this neighbor. ``enable_enhanced_refresh`` enables Enhanced Route Refresh for this neighbor. ``enable_four_octet_as_number`` enables Four-Octet AS Number capability for this neighbor. ``next_hop`` specifies the next hop IP address. If not specified, host's ip address to access to a peer is used. ``password`` is used for the MD5 authentication if it's specified. By default, the MD5 authentication is disabled. ``multi_exit_disc`` specifies multi exit discriminator (MED) value as an int type value. If omitted, MED is not sent to the neighbor. ``site_of_origins`` specifies site_of_origin values. This parameter must be a list of string. ``is_route_server_client`` specifies whether this neighbor is a router server's client or not. ``is_route_reflector_client`` specifies whether this neighbor is a router reflector's client or not. ``is_next_hop_self`` specifies whether the BGP speaker announces its own ip address to iBGP neighbor or not as path's next_hop address. ``local_address`` specifies Loopback interface address for iBGP peering. ``local_port`` specifies source TCP port for iBGP peering. ``local_as`` specifies local AS number per-peer. If omitted, the AS number of BGPSpeaker instance is used. ``connect_mode`` specifies how to connect to this neighbor. This parameter must be one of the following. - CONNECT_MODE_ACTIVE = 'active' - CONNECT_MODE_PASSIVE = 'passive' - CONNECT_MODE_BOTH (default) = 'both' """ bgp_neighbor = { neighbors.IP_ADDRESS: address, neighbors.REMOTE_AS: remote_as, REMOTE_PORT: remote_port, PEER_NEXT_HOP: next_hop, PASSWORD: password, IS_ROUTE_SERVER_CLIENT: is_route_server_client, IS_ROUTE_REFLECTOR_CLIENT: is_route_reflector_client, IS_NEXT_HOP_SELF: is_next_hop_self, CONNECT_MODE: connect_mode, CAP_ENHANCED_REFRESH: enable_enhanced_refresh, CAP_FOUR_OCTET_AS_NUMBER: enable_four_octet_as_number, CAP_MBGP_IPV4: enable_ipv4, CAP_MBGP_IPV6: enable_ipv6, CAP_MBGP_VPNV4: enable_vpnv4, CAP_MBGP_VPNV6: enable_vpnv6, CAP_MBGP_EVPN: enable_evpn, CAP_MBGP_IPV4FS: enable_ipv4fs, CAP_MBGP_IPV6FS: enable_ipv6fs, CAP_MBGP_VPNV4FS: enable_vpnv4fs, CAP_MBGP_VPNV6FS: enable_vpnv6fs, CAP_MBGP_L2VPNFS: enable_l2vpnfs, } if multi_exit_disc: bgp_neighbor[MULTI_EXIT_DISC] = multi_exit_disc if site_of_origins: bgp_neighbor[SITE_OF_ORIGINS] = site_of_origins if local_address: bgp_neighbor[LOCAL_ADDRESS] = local_address if local_port: bgp_neighbor[LOCAL_PORT] = local_port if local_as: bgp_neighbor[LOCAL_AS] = local_as call('neighbor.create', **bgp_neighbor)
[ "def", "neighbor_add", "(", "self", ",", "address", ",", "remote_as", ",", "remote_port", "=", "DEFAULT_BGP_PORT", ",", "enable_ipv4", "=", "DEFAULT_CAP_MBGP_IPV4", ",", "enable_ipv6", "=", "DEFAULT_CAP_MBGP_IPV6", ",", "enable_vpnv4", "=", "DEFAULT_CAP_MBGP_VPNV4", "...
39.013514
22.331081
def isel(self, indexers=None, drop=False, **indexers_kwargs): """Return a new DataArray whose dataset is given by integer indexing along the specified dimension(s). See Also -------- Dataset.isel DataArray.sel """ indexers = either_dict_or_kwargs(indexers, indexers_kwargs, 'isel') ds = self._to_temp_dataset().isel(drop=drop, indexers=indexers) return self._from_temp_dataset(ds)
[ "def", "isel", "(", "self", ",", "indexers", "=", "None", ",", "drop", "=", "False", ",", "*", "*", "indexers_kwargs", ")", ":", "indexers", "=", "either_dict_or_kwargs", "(", "indexers", ",", "indexers_kwargs", ",", "'isel'", ")", "ds", "=", "self", "."...
37.5
18.083333
def router_fabric_virtual_gateway_address_family_ipv4_gratuitous_arp_timer(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") router = ET.SubElement(config, "router", xmlns="urn:brocade.com:mgmt:brocade-common-def") fabric_virtual_gateway = ET.SubElement(router, "fabric-virtual-gateway", xmlns="urn:brocade.com:mgmt:brocade-anycast-gateway") address_family = ET.SubElement(fabric_virtual_gateway, "address-family") ipv4 = ET.SubElement(address_family, "ipv4") gratuitous_arp = ET.SubElement(ipv4, "gratuitous-arp") timer = ET.SubElement(gratuitous_arp, "timer") timer.text = kwargs.pop('timer') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "router_fabric_virtual_gateway_address_family_ipv4_gratuitous_arp_timer", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "router", "=", "ET", ".", "SubElement", "(", "config", ",", "\"router\"", ...
55.428571
25.642857
def dict_diff(old, new): """ Return a dict representing the differences between the dicts `old` and `new`. Deleted keys appear as a key with the value :data:`None`, added and changed keys appear as a key with the new value. """ old_keys = viewkeys(old) new_keys = viewkeys(dict(new)) out = {} for key in new_keys - old_keys: out[key] = new[key] for key in old_keys - new_keys: out[key] = None for key in old_keys & new_keys: if old[key] != new[key]: out[key] = new[key] return out
[ "def", "dict_diff", "(", "old", ",", "new", ")", ":", "old_keys", "=", "viewkeys", "(", "old", ")", "new_keys", "=", "viewkeys", "(", "dict", "(", "new", ")", ")", "out", "=", "{", "}", "for", "key", "in", "new_keys", "-", "old_keys", ":", "out", ...
32.235294
13.764706
def add_mpl_colorscale(fig, heatmap_gs, ax_map, params, title=None): """Add colour scale to heatmap.""" # Set tick intervals cbticks = [params.vmin + e * params.vdiff for e in (0, 0.25, 0.5, 0.75, 1)] if params.vmax > 10: exponent = int(floor(log10(params.vmax))) - 1 cbticks = [int(round(e, -exponent)) for e in cbticks] scale_subplot = gridspec.GridSpecFromSubplotSpec( 1, 3, subplot_spec=heatmap_gs[0, 0], wspace=0.0, hspace=0.0 ) scale_ax = fig.add_subplot(scale_subplot[0, 1]) cbar = fig.colorbar(ax_map, scale_ax, ticks=cbticks) if title: cbar.set_label(title, fontsize=6) cbar.ax.yaxis.set_ticks_position("left") cbar.ax.yaxis.set_label_position("left") cbar.ax.tick_params(labelsize=6) cbar.outline.set_linewidth(0) return cbar
[ "def", "add_mpl_colorscale", "(", "fig", ",", "heatmap_gs", ",", "ax_map", ",", "params", ",", "title", "=", "None", ")", ":", "# Set tick intervals", "cbticks", "=", "[", "params", ".", "vmin", "+", "e", "*", "params", ".", "vdiff", "for", "e", "in", ...
40.25
17.35
def _parse_lists(self, match): '''Parse lists.''' # Don't parse usernames here if match.group(4) is None: return match.group(0) pre, at_char, user, list_name = match.groups() list_name = list_name[1:] if self._include_spans: self._lists.append((user, list_name, match.span(0))) else: self._lists.append((user, list_name)) if self._html: return '%s%s' % (pre, self.format_list(at_char, user, list_name))
[ "def", "_parse_lists", "(", "self", ",", "match", ")", ":", "# Don't parse usernames here", "if", "match", ".", "group", "(", "4", ")", "is", "None", ":", "return", "match", ".", "group", "(", "0", ")", "pre", ",", "at_char", ",", "user", ",", "list_na...
31.375
19.125
def open(filename, mode='r', content_type=None, options=None, read_buffer_size=storage_api.ReadBuffer.DEFAULT_BUFFER_SIZE, retry_params=None, _account_id=None, offset=0): """Opens a Google Cloud Storage file and returns it as a File-like object. Args: filename: A Google Cloud Storage filename of form '/bucket/filename'. mode: 'r' for reading mode. 'w' for writing mode. In reading mode, the file must exist. In writing mode, a file will be created or be overrode. content_type: The MIME type of the file. str. Only valid in writing mode. options: A str->basestring dict to specify additional headers to pass to GCS e.g. {'x-goog-acl': 'private', 'x-goog-meta-foo': 'foo'}. Supported options are x-goog-acl, x-goog-meta-, cache-control, content-disposition, and content-encoding. Only valid in writing mode. See https://developers.google.com/storage/docs/reference-headers for details. read_buffer_size: The buffer size for read. Read keeps a buffer and prefetches another one. To minimize blocking for large files, always read by buffer size. To minimize number of RPC requests for small files, set a large buffer size. Max is 30MB. retry_params: An instance of api_utils.RetryParams for subsequent calls to GCS from this file handle. If None, the default one is used. _account_id: Internal-use only. offset: Number of bytes to skip at the start of the file. If None, 0 is used. Returns: A reading or writing buffer that supports File-like interface. Buffer must be closed after operations are done. Raises: errors.AuthorizationError: if authorization failed. errors.NotFoundError: if an object that's expected to exist doesn't. ValueError: invalid open mode or if content_type or options are specified in reading mode. """ common.validate_file_path(filename) api = storage_api._get_storage_api(retry_params=retry_params, account_id=_account_id) filename = api_utils._quote_filename(filename) if mode == 'w': common.validate_options(options) return storage_api.StreamingBuffer(api, filename, content_type, options) elif mode == 'r': if content_type or options: raise ValueError('Options and content_type can only be specified ' 'for writing mode.') return storage_api.ReadBuffer(api, filename, buffer_size=read_buffer_size, offset=offset) else: raise ValueError('Invalid mode %s.' % mode)
[ "def", "open", "(", "filename", ",", "mode", "=", "'r'", ",", "content_type", "=", "None", ",", "options", "=", "None", ",", "read_buffer_size", "=", "storage_api", ".", "ReadBuffer", ".", "DEFAULT_BUFFER_SIZE", ",", "retry_params", "=", "None", ",", "_accou...
43.327869
21.704918
def unpack(packed, object_hook=decode, list_hook=None, use_list=False, encoding='utf-8', unicode_errors='strict', object_pairs_hook=None, max_buffer_size=0, ext_hook=ExtType): """ Unpack a packed object, return an iterator Note: packed lists will be returned as tuples """ return Unpacker(packed, object_hook=object_hook, list_hook=list_hook, use_list=use_list, encoding=encoding, unicode_errors=unicode_errors, object_pairs_hook=object_pairs_hook, max_buffer_size=max_buffer_size, ext_hook=ext_hook)
[ "def", "unpack", "(", "packed", ",", "object_hook", "=", "decode", ",", "list_hook", "=", "None", ",", "use_list", "=", "False", ",", "encoding", "=", "'utf-8'", ",", "unicode_errors", "=", "'strict'", ",", "object_pairs_hook", "=", "None", ",", "max_buffer_...
41.1875
10.8125
def from_url(cls, url, **kwargs): """ Creates an instance of the KubeConfig class from a single URL (useful for interacting with kubectl proxy). """ doc = { "clusters": [ { "name": "self", "cluster": { "server": url, }, }, ], "contexts": [ { "name": "self", "context": { "cluster": "self", }, } ], "current-context": "self", } self = cls(doc, **kwargs) return self
[ "def", "from_url", "(", "cls", ",", "url", ",", "*", "*", "kwargs", ")", ":", "doc", "=", "{", "\"clusters\"", ":", "[", "{", "\"name\"", ":", "\"self\"", ",", "\"cluster\"", ":", "{", "\"server\"", ":", "url", ",", "}", ",", "}", ",", "]", ",", ...
26.730769
14.346154
def labels(self, *labelvalues, **labelkwargs): """Return the child for the given labelset. All metrics can have labels, allowing grouping of related time series. Taking a counter as an example: from prometheus_client import Counter c = Counter('my_requests_total', 'HTTP Failures', ['method', 'endpoint']) c.labels('get', '/').inc() c.labels('post', '/submit').inc() Labels can also be provided as keyword arguments: from prometheus_client import Counter c = Counter('my_requests_total', 'HTTP Failures', ['method', 'endpoint']) c.labels(method='get', endpoint='/').inc() c.labels(method='post', endpoint='/submit').inc() See the best practices on [naming](http://prometheus.io/docs/practices/naming/) and [labels](http://prometheus.io/docs/practices/instrumentation/#use-labels). """ if not self._labelnames: raise ValueError('No label names were set when constructing %s' % self) if self._labelvalues: raise ValueError('%s already has labels set (%s); can not chain calls to .labels()' % ( self, dict(zip(self._labelnames, self._labelvalues)) )) if labelvalues and labelkwargs: raise ValueError("Can't pass both *args and **kwargs") if labelkwargs: if sorted(labelkwargs) != sorted(self._labelnames): raise ValueError('Incorrect label names') labelvalues = tuple(unicode(labelkwargs[l]) for l in self._labelnames) else: if len(labelvalues) != len(self._labelnames): raise ValueError('Incorrect label count') labelvalues = tuple(unicode(l) for l in labelvalues) with self._lock: if labelvalues not in self._metrics: self._metrics[labelvalues] = self.__class__( self._name, documentation=self._documentation, labelnames=self._labelnames, unit=self._unit, labelvalues=labelvalues, **self._kwargs ) return self._metrics[labelvalues]
[ "def", "labels", "(", "self", ",", "*", "labelvalues", ",", "*", "*", "labelkwargs", ")", ":", "if", "not", "self", ".", "_labelnames", ":", "raise", "ValueError", "(", "'No label names were set when constructing %s'", "%", "self", ")", "if", "self", ".", "_...
41.166667
22.611111
def _get_lutfiles_version(self): """Check the version of the atm correction luts from the version file in the specific aerosol correction directory """ basedir = RAYLEIGH_LUT_DIRS[self._aerosol_type] lutfiles_version_path = os.path.join(basedir, ATM_CORRECTION_LUT_VERSION[self._aerosol_type]['filename']) if not os.path.exists(lutfiles_version_path): return "v0.0.0" with open(lutfiles_version_path, 'r') as fpt: # Get the version from the file return fpt.readline().strip()
[ "def", "_get_lutfiles_version", "(", "self", ")", ":", "basedir", "=", "RAYLEIGH_LUT_DIRS", "[", "self", ".", "_aerosol_type", "]", "lutfiles_version_path", "=", "os", ".", "path", ".", "join", "(", "basedir", ",", "ATM_CORRECTION_LUT_VERSION", "[", "self", ".",...
40.266667
18.066667
def recompress_archive(archive, verbosity=0, interactive=True): """Recompress an archive to hopefully smaller size.""" util.check_existing_filename(archive) util.check_writable_filename(archive) if verbosity >= 0: util.log_info("Recompressing %s ..." % (archive,)) res = _recompress_archive(archive, verbosity=verbosity, interactive=interactive) if res and verbosity >= 0: util.log_info(res) return 0
[ "def", "recompress_archive", "(", "archive", ",", "verbosity", "=", "0", ",", "interactive", "=", "True", ")", ":", "util", ".", "check_existing_filename", "(", "archive", ")", "util", ".", "check_writable_filename", "(", "archive", ")", "if", "verbosity", ">=...
43.5
15.7
def _ps(self, sys_output): ''' a helper method for parsing docker ps output ''' import re gap_pattern = re.compile('\t|\s{2,}') container_list = [] output_lines = sys_output.split('\n') column_headers = gap_pattern.split(output_lines[0]) for i in range(1,len(output_lines)): columns = gap_pattern.split(output_lines[i]) container_details = {} if len(columns) > 1: for j in range(len(column_headers)): container_details[column_headers[j]] = '' if j <= len(columns) - 1: container_details[column_headers[j]] = columns[j] # stupid hack for possible empty port column if container_details['PORTS'] and not container_details['NAMES']: from copy import deepcopy container_details['NAMES'] = deepcopy(container_details['PORTS']) container_details['PORTS'] = '' container_list.append(container_details) return container_list
[ "def", "_ps", "(", "self", ",", "sys_output", ")", ":", "import", "re", "gap_pattern", "=", "re", ".", "compile", "(", "'\\t|\\s{2,}'", ")", "container_list", "=", "[", "]", "output_lines", "=", "sys_output", ".", "split", "(", "'\\n'", ")", "column_header...
44.76
18.12
def parse_css (url_data): """ Parse a CSS file for url() patterns. """ lineno = 0 linkfinder = linkparse.css_url_re.finditer strip_comments = linkparse.strip_c_comments for line in strip_comments(url_data.get_content()).splitlines(): lineno += 1 for mo in linkfinder(line): column = mo.start("url") url = strformat.unquote(mo.group("url").strip()) url_data.add_url(url, line=lineno, column=column)
[ "def", "parse_css", "(", "url_data", ")", ":", "lineno", "=", "0", "linkfinder", "=", "linkparse", ".", "css_url_re", ".", "finditer", "strip_comments", "=", "linkparse", ".", "strip_c_comments", "for", "line", "in", "strip_comments", "(", "url_data", ".", "ge...
35.769231
11.769231
def ccs_normalize(compIM, ccsnorm): """ normalize the ccs representation Parameters ---------- compIM: 2d array The CCS image in CCS representation ccsnorm: 2d array The normalization matrix in ccs representation Returns ------- compIM: 2d array The normalized CCS image Notes ----- (basically an element wise division for CCS) Should probably not be used from outside """ compIM = np.asarray(compIM) ccsnorm = np.asarray(ccsnorm) ys = ccsnorm.shape[0] xs = ccsnorm.shape[1] # start with first column ccsnorm[2::2, 0] = ccsnorm[1:ys - 1:2, 0] # continue with middle columns ccsnorm[:, 2::2] = ccsnorm[:, 1:xs - 1:2] # finish whith last row if even if xs % 2 is 0: ccsnorm[2::2, xs - 1] = ccsnorm[1:ys - 1:2, xs - 1] # solve problem with 0/0 ccsnorm[ccsnorm == 0] = np.nextafter(0., 1., dtype = ccsnorm.dtype) res = compIM / ccsnorm return res
[ "def", "ccs_normalize", "(", "compIM", ",", "ccsnorm", ")", ":", "compIM", "=", "np", ".", "asarray", "(", "compIM", ")", "ccsnorm", "=", "np", ".", "asarray", "(", "ccsnorm", ")", "ys", "=", "ccsnorm", ".", "shape", "[", "0", "]", "xs", "=", "ccsn...
26.416667
17.611111
def do_b0(self, line): """Send the Master a BinaryInput (group 2) value of False at index 6. Command syntax is: b0""" self.application.apply_update(opendnp3.Binary(False), index=6)
[ "def", "do_b0", "(", "self", ",", "line", ")", ":", "self", ".", "application", ".", "apply_update", "(", "opendnp3", ".", "Binary", "(", "False", ")", ",", "index", "=", "6", ")" ]
64.666667
16
def computePreRec(cm, class_names): ''' This function computes the precision, recall and f1 measures, given a confusion matrix ''' n_classes = cm.shape[0] if len(class_names) != n_classes: print("Error in computePreRec! Confusion matrix and class_names " "list must be of the same size!") return precision = [] recall = [] f1 = [] for i, c in enumerate(class_names): precision.append(cm[i,i] / numpy.sum(cm[:,i])) recall.append(cm[i,i] / numpy.sum(cm[i,:])) f1.append( 2 * precision[-1] * recall[-1] / (precision[-1] + recall[-1])) return recall, precision, f1
[ "def", "computePreRec", "(", "cm", ",", "class_names", ")", ":", "n_classes", "=", "cm", ".", "shape", "[", "0", "]", "if", "len", "(", "class_names", ")", "!=", "n_classes", ":", "print", "(", "\"Error in computePreRec! Confusion matrix and class_names \"", "\"...
35.833333
18.722222
def k_closest(points, k, origin=(0, 0)): # Time: O(k+(n-k)logk) # Space: O(k) """Initialize max heap with first k points. Python does not support a max heap; thus we can use the default min heap where the keys (distance) are negated. """ heap = [(-distance(p, origin), p) for p in points[:k]] heapify(heap) """ For every point p in points[k:], check if p is smaller than the root of the max heap; if it is, add p to heap and remove root. Reheapify. """ for p in points[k:]: d = distance(p, origin) heappushpop(heap, (-d, p)) # heappushpop does conditional check """Same as: if d < -heap[0][0]: heappush(heap, (-d,p)) heappop(heap) Note: heappushpop is more efficient than separate push and pop calls. Each heappushpop call takes O(logk) time. """ return [p for nd, p in heap]
[ "def", "k_closest", "(", "points", ",", "k", ",", "origin", "=", "(", "0", ",", "0", ")", ")", ":", "# Time: O(k+(n-k)logk)", "# Space: O(k)", "heap", "=", "[", "(", "-", "distance", "(", "p", ",", "origin", ")", ",", "p", ")", "for", "p", "in", ...
32.178571
17.178571
def create_entity(self): """Create a new entity. The entity will have a higher UID than any previously associated with this world. :return: the new entity :rtype: :class:`essence.Entity`""" self._highest_id_seen += 1 entity = Entity(self._highest_id_seen, self) self._entities.append(entity) return entity
[ "def", "create_entity", "(", "self", ")", ":", "self", ".", "_highest_id_seen", "+=", "1", "entity", "=", "Entity", "(", "self", ".", "_highest_id_seen", ",", "self", ")", "self", ".", "_entities", ".", "append", "(", "entity", ")", "return", "entity" ]
30.666667
16.083333
def set_itunes_closed_captioned(self): """Parses isClosedCaptioned from itunes tags and sets value""" try: self.itunes_closed_captioned = self.soup.find( 'itunes:isclosedcaptioned').string self.itunes_closed_captioned = self.itunes_closed_captioned.lower() except AttributeError: self.itunes_closed_captioned = None
[ "def", "set_itunes_closed_captioned", "(", "self", ")", ":", "try", ":", "self", ".", "itunes_closed_captioned", "=", "self", ".", "soup", ".", "find", "(", "'itunes:isclosedcaptioned'", ")", ".", "string", "self", ".", "itunes_closed_captioned", "=", "self", "....
48
14.25
def verify(password, encoded): """ Verify a Password :param password: :param encoded: :return: True or False """ algorithm, iterations, salt, h = split(encoded) to_verify = encode(password, algorithm, salt, int(iterations)) return hmac.compare_digest(to_verify.encode(), encoded.encode())
[ "def", "verify", "(", "password", ",", "encoded", ")", ":", "algorithm", ",", "iterations", ",", "salt", ",", "h", "=", "split", "(", "encoded", ")", "to_verify", "=", "encode", "(", "password", ",", "algorithm", ",", "salt", ",", "int", "(", "iteratio...
28.636364
17.181818
def update(cls): """ Update rows to include known network interfaces """ ifaddrs = getifaddrs() # Create new interfaces for ifname in ifaddrs.keys(): if filter(ifname.startswith, cls.NAME_FILTER): cls.objects.get_or_create(name=ifname) # Delete no longer existing ones cls.objects.exclude(name__in=ifaddrs.keys()).delete()
[ "def", "update", "(", "cls", ")", ":", "ifaddrs", "=", "getifaddrs", "(", ")", "# Create new interfaces", "for", "ifname", "in", "ifaddrs", ".", "keys", "(", ")", ":", "if", "filter", "(", "ifname", ".", "startswith", ",", "cls", ".", "NAME_FILTER", ")",...
33.666667
12.833333
def plot_sgls(mask_exp, depths, mask_tag_filt, sgls, mask_sgls_filt, Az_g_hf, idx_start=None, idx_end=None, path_plot=None, linewidth=0.5, leg_bbox=(1.23,1), clip_x=False): '''Plot sub-glides over depth and high-pass filtered accelerometer signal Args ---- mask_exp: ndarray Boolean mask array to slice tag data to experimtal period depths: ndarray Depth values at each sensor sampling mask_tag_filt: ndarray Boolean mask to slice filtered sub-glides from tag data sgls: pandas.DataFrame Sub-glide summary information defined by `SGL` start/stop indices mask_sgls_filt: ndarray Boolean mask to slice filtered sub-glides from sgls data Az_g_hf: ndarray High-pass filtered, calibrated z-axis accelerometer data idx_start: int Sample index position where plot should begin idx_stop: int Sample index position where plot should stop path_plot: str Path and filename for figure to be saved linewidth: float Width of plot lines (Default: 0.5) clip_x: bool Swith to clip x-axis to the experimental period ''' import matplotlib.pyplot as plt from matplotlib.ticker import FuncFormatter, ScalarFormatter import numpy from . import plotutils from .. import utils # Create experiment mask from specified start/end indices if passed if idx_start or idx_end: mask_exp = numpy.zeros(len(depths), dtype=bool) if idx_start and idx_end: mask_exp[idx_start:idx_end] = True elif idx_start: mask_exp[idx_start:ind_exp[-1]] = True elif idx_end: mask_exp[ind_exp[0]:idx_end] = True # Filter passed data to experimental period depths = depths[mask_exp] Az_g_hf = Az_g_hf[mask_exp] # Create subglide indice groups for plotting sgl_ind = numpy.where(mask_tag_filt & mask_exp)[0] notsgl_ind = numpy.where((~mask_tag_filt) & mask_exp)[0] # Create experiment indices from `mask_exp` ind_exp = numpy.where(mask_exp)[0] offset = 0 plt_offset = ind_exp[0] # Clip values to within experimental period if clip_x: offset = ind_exp[0] ind_exp = ind_exp - offset sgl_ind = sgl_ind - offset notsgl_ind = notsgl_ind - offset plt_offset = 0 fig, (ax1, ax2) = plt.subplots(2, 1) # Plot glides c0, c1 = _colors[0:2] ax1 = plotutils.plot_noncontiguous(ax1, depths, sgl_ind, c0, 'Glides', offset=plt_offset, linewidth=linewidth*2) ax1 = plotutils.plot_noncontiguous(ax1, depths, notsgl_ind, c1, 'Stroking', offset=plt_offset, linewidth=linewidth, linestyle='--') # Plot HF Z-axis c0 = _colors[2] ax2.plot(ind_exp, Az_g_hf, color=c0, label='Z-axis HF Acc.', linewidth=linewidth) # Get dives within mask gg = sgls[mask_sgls_filt] # Get midpoint of dive occurance x = (gg['start_idx'] + (gg['stop_idx'] - gg['start_idx'])/2) x = x.values.astype(float) x_mask = (x-offset > ind_exp[0]) & (x-offset< ind_exp[-1]) x = x[x_mask] # Get depth at midpoint if clip_x: x = x - offset ind_x = numpy.round(x).astype(int) else: ind_x = numpy.round(x - plt_offset).astype(int) y = depths[ind_x] # For each dive_id, sgl_id pair, create annotation string, apply dids = gg['dive_id'].values.astype(int) sids = numpy.array(gg.index) dids = dids[x_mask] sids = sids[x_mask] n = ['Dive:{}, SGL:{}'.format(did, sid) for did, sid in zip(dids, sids)] diff = ind_exp[1] - ind_exp[0] for i, txt in enumerate(n): # TODO semi-hardcoded dist for annotation ax1.annotate(txt, (x[i]+int(diff*16),y[i])) # Plot shaded areas where not sub-glides ax1 = plotutils.plot_shade_mask(ax1, ind_exp, ~mask_tag_filt[mask_exp], facecolor='#d9d9d9') ax2 = plotutils.plot_shade_mask(ax2, ind_exp, ~mask_tag_filt[mask_exp], facecolor='#d9d9d9') # Set x-axes limits for ax in [ax1, ax2]: ticks = ax.get_yticks() ax.set_ylim((ticks[0], ticks[-1])) if idx_start: xmin = idx_start else: xmin = ax.get_xlim()[0] if idx_end: xmax = idx_end else: xmax = ax.get_xlim()[1] if clip_x: xmin, xmax = xmin-offset, xmax-offset ax.set_xlim(xmin, xmax) for tick in ax.get_xticklabels(): tick.set_rotation(45) tick.set_ha('right') # Update Depth subplot y-axis labels, limits, invert depth ax1.set_ylabel('Depth ($m$)') ymin = depths.min() - (depths.max()*0.01) ymax = depths.max() + (depths.max()*0.01) ax1.set_ylim((ymin, ymax)) ax1.invert_yaxis() ax1.get_yaxis().set_label_coords(-0.09,0.5) # Update PRH subplot y labels, limits ax2.set_ylabel('Z-axis acceleration ($g$)') ax2.set_ylim((Az_g_hf.min(), Az_g_hf.max())) ax2.get_yaxis().set_label_coords(-0.09,0.5) # Scientific notation for ax1 `n_samples` ax1.set_xlabel('No. sensor samples') mf1 = ScalarFormatter(useMathText=True) mf1.set_powerlimits((-2,2)) ax1.xaxis.set_major_formatter(mf1) # Convert n_samples to hourmin labels ax2.set_xlabel('Experiment duration ($min \, sec$)') mf2 = FuncFormatter(plotutils.nsamples_to_minsec) ax2.xaxis.set_major_formatter(mf2) # Create legends outside plot area leg1 = ax1.legend(bbox_to_anchor=leg_bbox) plt.tight_layout(rect=[0,0,0.8,1]) # Save plot if `path_plot` passed if path_plot: import os fname = 'subglide_highlight' if idx_start: fname += '_start{}'.format(idx_start) if idx_end: fname+= '_stop{}'.format(idx_end) ext = '.eps' file_fig = os.path.join(path_plot, fname+ext) plt.savefig(file_fig, box='tight') plt.show() return None
[ "def", "plot_sgls", "(", "mask_exp", ",", "depths", ",", "mask_tag_filt", ",", "sgls", ",", "mask_sgls_filt", ",", "Az_g_hf", ",", "idx_start", "=", "None", ",", "idx_end", "=", "None", ",", "path_plot", "=", "None", ",", "linewidth", "=", "0.5", ",", "l...
32.966667
18.177778
def _infer_spaces(s): """ Uses dynamic programming to infer the location of spaces in a string without spaces. """ s = s.lower() # Find the best match for the i first characters, assuming cost has # been built for the i-1 first characters. # Returns a pair (match_cost, match_length). def best_match(i): candidates = enumerate(reversed(cost[max(0, i - MAXWORD):i])) return min((c + WORDCOST.get(s[i-k-1: i], 9e999), k + 1) for k, c in candidates) # Build the cost array. cost = [0] for i in range(1, len(s) + 1): c, k = best_match(i) cost.append(c) # Backtrack to recover the minimal-cost string. out = [] i = len(s) while i > 0: c, k = best_match(i) assert c == cost[i] out.append(s[i-k:i]) i -= k return u" ".join(reversed(out))
[ "def", "_infer_spaces", "(", "s", ")", ":", "s", "=", "s", ".", "lower", "(", ")", "# Find the best match for the i first characters, assuming cost has", "# been built for the i-1 first characters.", "# Returns a pair (match_cost, match_length).", "def", "best_match", "(", "i",...
27.612903
19.548387
def previousSibling(self) -> Optional[AbstractNode]: """Return the previous sibling of this node. If there is no previous sibling, return ``None``. """ parent = self.parentNode if parent is None: return None return parent.childNodes.item(parent.childNodes.index(self) - 1)
[ "def", "previousSibling", "(", "self", ")", "->", "Optional", "[", "AbstractNode", "]", ":", "parent", "=", "self", ".", "parentNode", "if", "parent", "is", "None", ":", "return", "None", "return", "parent", ".", "childNodes", ".", "item", "(", "parent", ...
36.111111
15.555556
def get_annotated_list_qs(cls, qs): """ Gets an annotated list from a queryset. """ result, info = [], {} start_depth, prev_depth = (None, None) for node in qs: depth = node.get_depth() if start_depth is None: start_depth = depth open = (depth and (prev_depth is None or depth > prev_depth)) if prev_depth is not None and depth < prev_depth: info['close'] = list(range(0, prev_depth - depth)) info = {'open': open, 'close': [], 'level': depth - start_depth} result.append((node, info,)) prev_depth = depth if start_depth and start_depth > 0: info['close'] = list(range(0, prev_depth - start_depth + 1)) return result
[ "def", "get_annotated_list_qs", "(", "cls", ",", "qs", ")", ":", "result", ",", "info", "=", "[", "]", ",", "{", "}", "start_depth", ",", "prev_depth", "=", "(", "None", ",", "None", ")", "for", "node", "in", "qs", ":", "depth", "=", "node", ".", ...
41.578947
12.631579
def estimateBIsochrone(pot,R,z,phi=None): """ NAME: estimateBIsochrone PURPOSE: Estimate a good value for the scale of the isochrone potential by matching the slope of the rotation curve INPUT: pot- Potential instance or list thereof R,z - coordinates (if these are arrays, the median estimated delta is returned, i.e., if this is an orbit) phi= (None) azimuth to use for non-axisymmetric potentials (array if R and z are arrays) OUTPUT: b if 1 R,Z given bmin,bmedian,bmax if multiple R given HISTORY: 2013-09-12 - Written - Bovy (IAS) 2016-02-20 - Changed input order to allow physical conversions - Bovy (UofT) 2016-06-28 - Added phi= keyword for non-axisymmetric potential - Bovy (UofT) """ if pot is None: #pragma: no cover raise IOError("pot= needs to be set to a Potential instance or list thereof") if isinstance(R,nu.ndarray): if phi is None: phi= [None for r in R] bs= nu.array([estimateBIsochrone(pot,R[ii],z[ii],phi=phi[ii], use_physical=False) for ii in range(len(R))]) return nu.array([nu.amin(bs[True^nu.isnan(bs)]), nu.median(bs[True^nu.isnan(bs)]), nu.amax(bs[True^nu.isnan(bs)])]) else: r2= R**2.+z**2 r= math.sqrt(r2) dlvcdlr= dvcircdR(pot,r,phi=phi,use_physical=False)/vcirc(pot,r,phi=phi,use_physical=False)*r try: b= optimize.brentq(lambda x: dlvcdlr-(x/math.sqrt(r2+x**2.)-0.5*r2/(r2+x**2.)), 0.01,100.) except: #pragma: no cover b= nu.nan return b
[ "def", "estimateBIsochrone", "(", "pot", ",", "R", ",", "z", ",", "phi", "=", "None", ")", ":", "if", "pot", "is", "None", ":", "#pragma: no cover", "raise", "IOError", "(", "\"pot= needs to be set to a Potential instance or list thereof\"", ")", "if", "isinstance...
32.037736
28.45283
def mkstemp(self, suffix, prefix, directory=None): """ Generate temp file name in artifacts base dir and close temp file handle """ if not directory: directory = self.artifacts_dir fd, fname = tempfile.mkstemp(suffix, prefix, directory) os.close(fd) os.chmod(fname, 0o644) # FIXME: chmod to parent dir's mode? return fname
[ "def", "mkstemp", "(", "self", ",", "suffix", ",", "prefix", ",", "directory", "=", "None", ")", ":", "if", "not", "directory", ":", "directory", "=", "self", ".", "artifacts_dir", "fd", ",", "fname", "=", "tempfile", ".", "mkstemp", "(", "suffix", ","...
36.090909
12.454545
def _adjust_n_years(other, n, month, reference_day): """Adjust the number of times an annual offset is applied based on another date, and the reference day provided""" if n > 0: if other.month < month or (other.month == month and other.day < reference_day): n -= 1 else: if other.month > month or (other.month == month and other.day > reference_day): n += 1 return n
[ "def", "_adjust_n_years", "(", "other", ",", "n", ",", "month", ",", "reference_day", ")", ":", "if", "n", ">", "0", ":", "if", "other", ".", "month", "<", "month", "or", "(", "other", ".", "month", "==", "month", "and", "other", ".", "day", "<", ...
40.416667
18.666667
def get_tools(whitelist, known_plugins): """ Filter all known plugins by a whitelist specified. If the whitelist is empty, default to all plugins. """ def getpath(c): return "%s:%s" % (c.__module__, c.__class__.__name__) tools = [x for x in known_plugins if getpath(x) in whitelist] if not tools: if whitelist: raise UnknownTools(map(getpath, known_plugins)) tools = known_plugins return tools
[ "def", "get_tools", "(", "whitelist", ",", "known_plugins", ")", ":", "def", "getpath", "(", "c", ")", ":", "return", "\"%s:%s\"", "%", "(", "c", ".", "__module__", ",", "c", ".", "__class__", ".", "__name__", ")", "tools", "=", "[", "x", "for", "x",...
29.933333
18.866667
def timeit_profile(stmt, number, repeat, setup, profiler_factory, pickle_protocol, dump_filename, mono, **_ignored): """Profile a Python statement like timeit.""" del _ignored globals_ = {} exec_(setup, globals_) if number is None: # determine number so that 0.2 <= total time < 2.0 like timeit. dummy_profiler = profiler_factory() dummy_profiler.start() for x in range(1, 10): number = 10 ** x t = time.time() for y in range(number): exec_(stmt, globals_) if time.time() - t >= 0.2: break dummy_profiler.stop() del dummy_profiler code = compile('for _ in range(%d): %s' % (number, stmt), 'STATEMENT', 'exec') __profile__(stmt, code, globals_, profiler_factory, pickle_protocol=pickle_protocol, dump_filename=dump_filename, mono=mono)
[ "def", "timeit_profile", "(", "stmt", ",", "number", ",", "repeat", ",", "setup", ",", "profiler_factory", ",", "pickle_protocol", ",", "dump_filename", ",", "mono", ",", "*", "*", "_ignored", ")", ":", "del", "_ignored", "globals_", "=", "{", "}", "exec_"...
38.2
14
def trigger_installed(connection: connection, table: str, schema: str='public'): """Test whether or not a psycopg2-pgevents trigger is installed for a table. Parameters ---------- connection: psycopg2.extensions.connection Active connection to a PostGreSQL database. table: str Table whose trigger-existence will be checked. schema: str Schema to which the table belongs. Returns ------- bool True if the trigger is installed, otherwise False. """ installed = False log('Checking if {}.{} trigger installed...'.format(schema, table), logger_name=_LOGGER_NAME) statement = SELECT_TRIGGER_STATEMENT.format( table=table, schema=schema ) result = execute(connection, statement) if result: installed = True log('...{}installed'.format('' if installed else 'NOT '), logger_name=_LOGGER_NAME) return installed
[ "def", "trigger_installed", "(", "connection", ":", "connection", ",", "table", ":", "str", ",", "schema", ":", "str", "=", "'public'", ")", ":", "installed", "=", "False", "log", "(", "'Checking if {}.{} trigger installed...'", ".", "format", "(", "schema", "...
26.617647
25.705882
def read_proto_object(fobj, klass): """Read a block of data and parse using the given protobuf object.""" log.debug('%s chunk', klass.__name__) obj = klass() obj.ParseFromString(read_block(fobj)) log.debug('Header: %s', str(obj)) return obj
[ "def", "read_proto_object", "(", "fobj", ",", "klass", ")", ":", "log", ".", "debug", "(", "'%s chunk'", ",", "klass", ".", "__name__", ")", "obj", "=", "klass", "(", ")", "obj", ".", "ParseFromString", "(", "read_block", "(", "fobj", ")", ")", "log", ...
36.857143
8.428571
def exceptions(self): """ Returns a list of ParamDoc objects (with empty names) of the exception tags for the function. >>> comments = parse_comments_for_file('examples/module_closure.js') >>> fn1 = FunctionDoc(comments[1]) >>> fn1.exceptions[0].doc 'Another exception' >>> fn1.exceptions[1].doc 'A fake exception' >>> fn1.exceptions[1].type 'String' """ def make_param(text): if '{' in text and '}' in text: # Make sure param name is blank: word_split = list(split_delimited('{}', ' ', text)) if word_split[1] != '': text = ' '.join([word_split[0], ''] + word_split[1:]) else: # Handle old JSDoc format word_split = text.split() text = '{%s} %s' % (word_split[0], ' '.join(word_split[1:])) return ParamDoc(text) return [make_param(text) for text in self.get_as_list('throws') + self.get_as_list('exception')]
[ "def", "exceptions", "(", "self", ")", ":", "def", "make_param", "(", "text", ")", ":", "if", "'{'", "in", "text", "and", "'}'", "in", "text", ":", "# Make sure param name is blank:", "word_split", "=", "list", "(", "split_delimited", "(", "'{}'", ",", "' ...
38.071429
15.285714
def dir_is_glotk(path): """check that the current directory is a glotk project folder""" test_set = set(["gloTK_info", "gloTK_assemblies", "gloTK_configs", "gloTK_reads", "gloTK_fastqc", "gloTK_kmer", "gloTK_reports"]) #http://stackoverflow.com/questions/11968976/ files = set([f for f in os.listdir('.') if os.path.isdir(f)]) intersection = test_set.intersection(files) if len(intersection) > 0: return True else: return False
[ "def", "dir_is_glotk", "(", "path", ")", ":", "test_set", "=", "set", "(", "[", "\"gloTK_info\"", ",", "\"gloTK_assemblies\"", ",", "\"gloTK_configs\"", ",", "\"gloTK_reads\"", ",", "\"gloTK_fastqc\"", ",", "\"gloTK_kmer\"", ",", "\"gloTK_reports\"", "]", ")", "#h...
39.923077
13.615385
def LatLngsToGoogleLink(source, destination): """Return a string "<a ..." for a trip at a random time.""" dt = GetRandomDatetime() return "<a href='%s'>from:%s to:%s on %s</a>" % ( LatLngsToGoogleUrl(source, destination, dt), FormatLatLng(source), FormatLatLng(destination), dt.ctime())
[ "def", "LatLngsToGoogleLink", "(", "source", ",", "destination", ")", ":", "dt", "=", "GetRandomDatetime", "(", ")", "return", "\"<a href='%s'>from:%s to:%s on %s</a>\"", "%", "(", "LatLngsToGoogleUrl", "(", "source", ",", "destination", ",", "dt", ")", ",", "Form...
43.428571
11