text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def _get_content_range(start: Optional[int], end: Optional[int], total: int) -> str: """Returns a suitable Content-Range header: >>> print(_get_content_range(None, 1, 4)) bytes 0-0/4 >>> print(_get_content_range(1, 3, 4)) bytes 1-2/4 >>> print(_get_content_range(None, None, 4)) bytes 0-3/4 """ start = start or 0 end = (end or total) - 1 return "bytes %s-%s/%s" % (start, end, total)
[ "def", "_get_content_range", "(", "start", ":", "Optional", "[", "int", "]", ",", "end", ":", "Optional", "[", "int", "]", ",", "total", ":", "int", ")", "->", "str", ":", "start", "=", "start", "or", "0", "end", "=", "(", "end", "or", "total", "...
32.076923
16.384615
def zone_absent(domain, profile): ''' Ensures a record is absent. :param domain: Zone name, i.e. the domain name :type domain: ``str`` :param profile: The profile key :type profile: ``str`` ''' zones = __salt__['libcloud_dns.list_zones'](profile) matching_zone = [z for z in zones if z['domain'] == domain] if not matching_zone: return state_result(True, 'Zone already absent', domain) else: result = __salt__['libcloud_dns.delete_zone'](matching_zone[0]['id'], profile) return state_result(result, 'Deleted zone', domain)
[ "def", "zone_absent", "(", "domain", ",", "profile", ")", ":", "zones", "=", "__salt__", "[", "'libcloud_dns.list_zones'", "]", "(", "profile", ")", "matching_zone", "=", "[", "z", "for", "z", "in", "zones", "if", "z", "[", "'domain'", "]", "==", "domain...
34
22.235294
def dt_dayofyear(x): """The ordinal day of the year. :returns: an expression containing the ordinal day of the year. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.dayofyear Expression = dt_dayofyear(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 285 1 42 2 316 """ import pandas as pd return pd.Series(x).dt.dayofyear.values
[ "def", "dt_dayofyear", "(", "x", ")", ":", "import", "pandas", "as", "pd", "return", "pd", ".", "Series", "(", "x", ")", ".", "dt", ".", "dayofyear", ".", "values" ]
25.37037
21
def jtag_configure(self, instr_regs=0, data_bits=0): """Configures the JTAG scan chain to determine which CPU to address. Must be called if the J-Link is connected to a JTAG scan chain with multiple devices. Args: self (JLink): the ``JLink`` instance instr_regs (int): length of instruction registers of all devices closer to TD1 then the addressed CPU data_bits (int): total number of data bits closer to TD1 than the addressed CPU Returns: ``None`` Raises: ValueError: if ``instr_regs`` or ``data_bits`` are not natural numbers """ if not util.is_natural(instr_regs): raise ValueError('IR value is not a natural number.') if not util.is_natural(data_bits): raise ValueError('Data bits is not a natural number.') self._dll.JLINKARM_ConfigJTAG(instr_regs, data_bits) return None
[ "def", "jtag_configure", "(", "self", ",", "instr_regs", "=", "0", ",", "data_bits", "=", "0", ")", ":", "if", "not", "util", ".", "is_natural", "(", "instr_regs", ")", ":", "raise", "ValueError", "(", "'IR value is not a natural number.'", ")", "if", "not",...
34.962963
23.518519
def inject_coordinates(self, x_coords, y_coords, rescale_x=None, rescale_y=None, original_x=None, original_y=None): ''' Inject custom x and y coordinates for each term into chart. Parameters ---------- x_coords: array-like positions on x-axis \in [0,1] y_coords: array-like positions on y-axis \in [0,1] rescale_x: lambda list[0,1]: list[0,1], default identity Rescales x-axis after filtering rescale_y: lambda list[0,1]: list[0,1], default identity Rescales y-axis after filtering original_x : array-like, optional Original, unscaled x-values. Defaults to x_coords original_y : array-like, optional Original, unscaled y-values. Defaults to y_coords Returns ------- self: ScatterChart ''' self._verify_coordinates(x_coords, 'x') self._verify_coordinates(y_coords, 'y') self.x_coords = x_coords self.y_coords = y_coords self._rescale_x = rescale_x self._rescale_y = rescale_y self.original_x = x_coords if original_x is None else original_x self.original_y = y_coords if original_y is None else original_y
[ "def", "inject_coordinates", "(", "self", ",", "x_coords", ",", "y_coords", ",", "rescale_x", "=", "None", ",", "rescale_y", "=", "None", ",", "original_x", "=", "None", ",", "original_y", "=", "None", ")", ":", "self", ".", "_verify_coordinates", "(", "x_...
37.351351
14.432432
def register(self): """ Register via the method configured :return: """ if self.register_method == "twine": self.register_by_twine() if self.register_method == "setup": self.register_by_setup() if self.register_method == "upload": self.upload()
[ "def", "register", "(", "self", ")", ":", "if", "self", ".", "register_method", "==", "\"twine\"", ":", "self", ".", "register_by_twine", "(", ")", "if", "self", ".", "register_method", "==", "\"setup\"", ":", "self", ".", "register_by_setup", "(", ")", "i...
29.636364
7.272727
def group_show(self, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/groups#show-group" api_path = "/api/v2/groups/{id}.json" api_path = api_path.format(id=id) return self.call(api_path, **kwargs)
[ "def", "group_show", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/groups/{id}.json\"", "api_path", "=", "api_path", ".", "format", "(", "id", "=", "id", ")", "return", "self", ".", "call", "(", "api_path", ",", ...
48.2
10.2
def info(endpoint): """Show metric info from a Prometheus endpoint. \b Example: $ ddev meta prom info :8080/_status/vars """ endpoint = sanitize_endpoint(endpoint) metrics = parse_metrics(endpoint) num_metrics = len(metrics) num_gauge = 0 num_counter = 0 num_histogram = 0 for data in itervalues(metrics): metric_type = data.get('type') if metric_type == 'gauge': num_gauge += 1 elif metric_type == 'counter': num_counter += 1 elif metric_type == 'histogram': num_histogram += 1 if num_metrics: echo_success('Number of metrics: {}'.format(num_metrics)) else: echo_warning('No metrics!') return if num_gauge: echo_info('Type `gauge`: {}'.format(num_gauge)) if num_counter: echo_info('Type `counter`: {}'.format(num_counter)) if num_histogram: echo_info('Type `histogram`: {}'.format(num_histogram))
[ "def", "info", "(", "endpoint", ")", ":", "endpoint", "=", "sanitize_endpoint", "(", "endpoint", ")", "metrics", "=", "parse_metrics", "(", "endpoint", ")", "num_metrics", "=", "len", "(", "metrics", ")", "num_gauge", "=", "0", "num_counter", "=", "0", "nu...
24.384615
19.564103
def arg(*args, **kwargs): """ Declares an argument for given function. Does not register the function anywhere, nor does it modify the function in any way. The signature of the decorator matches that of :meth:`argparse.ArgumentParser.add_argument`, only some keywords are not required if they can be easily guessed (e.g. you don't have to specify type or action when an `int` or `bool` default value is supplied). Typical use cases: - In combination with :func:`expects_obj` (which is not recommended); - in combination with ordinary function signatures to add details that cannot be expressed with that syntax (e.g. help message). Usage:: from argh import arg @arg('path', help='path to the file to load') @arg('--format', choices=['yaml','json']) @arg('-v', '--verbosity', choices=range(0,3), default=2) def load(path, something=None, format='json', dry_run=False, verbosity=1): loaders = {'json': json.load, 'yaml': yaml.load} loader = loaders[args.format] data = loader(args.path) if not args.dry_run: if verbosity < 1: print('saving to the database') put_to_database(data) In this example: - `path` declaration is extended with `help`; - `format` declaration is extended with `choices`; - `dry_run` declaration is not duplicated; - `verbosity` is extended with `choices` and the default value is overridden. (If both function signature and `@arg` define a default value for an argument, `@arg` wins.) .. note:: It is recommended to avoid using this decorator unless there's no way to tune the argument's behaviour or presentation using ordinary function signatures. Readability counts, don't repeat yourself. """ def wrapper(func): declared_args = getattr(func, ATTR_ARGS, []) # The innermost decorator is called first but appears last in the code. # We need to preserve the expected order of positional arguments, so # the outermost decorator inserts its value before the innermost's: declared_args.insert(0, dict(option_strings=args, **kwargs)) setattr(func, ATTR_ARGS, declared_args) return func return wrapper
[ "def", "arg", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "wrapper", "(", "func", ")", ":", "declared_args", "=", "getattr", "(", "func", ",", "ATTR_ARGS", ",", "[", "]", ")", "# The innermost decorator is called first but appears last in the c...
40.22807
23.982456
def load_allconfig(kconf, filename): """ Helper for all*config. Loads (merges) the configuration file specified by KCONFIG_ALLCONFIG, if any. See Documentation/kbuild/kconfig.txt in the Linux kernel. Disables warnings for duplicated assignments within configuration files for the duration of the call (disable_override_warnings() + disable_redun_warnings()), and enables them at the end. The KCONFIG_ALLCONFIG configuration file is expected to override symbols. Exits with sys.exit() (which raises a SystemExit exception) and prints an error to stderr if KCONFIG_ALLCONFIG is set but the configuration file can't be opened. kconf: Kconfig instance to load the configuration in. filename: Command-specific configuration filename - "allyes.config", "allno.config", etc. """ def std_msg(e): # "Upcasts" a _KconfigIOError to an IOError, removing the custom # __str__() message. The standard message is better here. return IOError(e.errno, e.strerror, e.filename) kconf.disable_override_warnings() kconf.disable_redun_warnings() allconfig = os.environ.get("KCONFIG_ALLCONFIG") if allconfig is not None: if allconfig in ("", "1"): try: kconf.load_config(filename, False) except IOError as e1: try: kconf.load_config("all.config", False) except IOError as e2: sys.exit("error: KCONFIG_ALLCONFIG is set, but neither {} " "nor all.config could be opened: {}, {}" .format(filename, std_msg(e1), std_msg(e2))) else: try: kconf.load_config(allconfig, False) except IOError as e: sys.exit("error: KCONFIG_ALLCONFIG is set to '{}', which " "could not be opened: {}" .format(allconfig, std_msg(e))) # API wart: It would be nice if there was a way to query and/or push/pop # warning settings kconf.enable_override_warnings() kconf.enable_redun_warnings()
[ "def", "load_allconfig", "(", "kconf", ",", "filename", ")", ":", "def", "std_msg", "(", "e", ")", ":", "# \"Upcasts\" a _KconfigIOError to an IOError, removing the custom", "# __str__() message. The standard message is better here.", "return", "IOError", "(", "e", ".", "er...
39.222222
22.185185
def extract_frames(self, bpf_buffer): """Extract all frames from the buffer and stored them in the received list.""" # noqa: E501 # Ensure that the BPF buffer contains at least the header len_bb = len(bpf_buffer) if len_bb < 20: # Note: 20 == sizeof(struct bfp_hdr) return # Extract useful information from the BPF header if FREEBSD or NETBSD: # struct bpf_xhdr or struct bpf_hdr32 bh_tstamp_offset = 16 else: # struct bpf_hdr bh_tstamp_offset = 8 # Parse the BPF header bh_caplen = struct.unpack('I', bpf_buffer[bh_tstamp_offset:bh_tstamp_offset + 4])[0] # noqa: E501 next_offset = bh_tstamp_offset + 4 bh_datalen = struct.unpack('I', bpf_buffer[next_offset:next_offset + 4])[0] # noqa: E501 next_offset += 4 bh_hdrlen = struct.unpack('H', bpf_buffer[next_offset:next_offset + 2])[0] # noqa: E501 if bh_datalen == 0: return # Get and store the Scapy object frame_str = bpf_buffer[bh_hdrlen:bh_hdrlen + bh_caplen] try: pkt = self.guessed_cls(frame_str) except Exception: if conf.debug_dissector: raise pkt = conf.raw_layer(frame_str) self.received_frames.append(pkt) # Extract the next frame end = self.bpf_align(bh_hdrlen, bh_caplen) if (len_bb - end) >= 20: self.extract_frames(bpf_buffer[end:])
[ "def", "extract_frames", "(", "self", ",", "bpf_buffer", ")", ":", "# noqa: E501", "# Ensure that the BPF buffer contains at least the header", "len_bb", "=", "len", "(", "bpf_buffer", ")", "if", "len_bb", "<", "20", ":", "# Note: 20 == sizeof(struct bfp_hdr)", "return", ...
37.974359
19.051282
def post_collect(self, obj): """ We want to manage the side-effect of not collecting other items of the same type as root model. If for example, you run the collect on a specific user that is linked to a model "A" linked (ForeignKey) to ANOTHER user. Then the collect won't collect this other user, but the collected model "A" will keep the ForeignKey value of a user we are not collecting. For now, we set the ForeignKey of ANOTHER user to the root one, to be sure that model "A" will always be linked to an existing user (of course, the meaning is changing, but only if this field is not unique. Before: user1 -> modelA -> user2843 After collection: user1 -> modelA -> user1 """ if not self.ALLOWS_SAME_TYPE_AS_ROOT_COLLECT: for field in self.get_local_fields(obj): if isinstance(field, ForeignKey) and not field.unique: # Relative field's API has been changed Django 2.0 # See https://docs.djangoproject.com/en/2.0/releases/1.9/#field-rel-changes for details if django.VERSION[0] == 2: remote_model = field.remote_field.model else: remote_model = field.rel.to if isinstance(self.root_obj, remote_model): setattr(obj, field.name, self.root_obj)
[ "def", "post_collect", "(", "self", ",", "obj", ")", ":", "if", "not", "self", ".", "ALLOWS_SAME_TYPE_AS_ROOT_COLLECT", ":", "for", "field", "in", "self", ".", "get_local_fields", "(", "obj", ")", ":", "if", "isinstance", "(", "field", ",", "ForeignKey", "...
53.111111
28.518519
def processFlat(self): """Main process. Returns ------- est_idxs : np.array(N) Estimated indeces the segment boundaries in frame indeces. est_labels : np.array(N-1) Estimated labels for the segments. """ # Preprocess to obtain features (array(n_frames, n_features)) F = self._preprocess() F = librosa.util.normalize(F, axis=0) F = librosa.feature.stack_memory(F.T).T self.config["hier"] = False my_bounds, my_labels, _ = main.scluster_segment(F, self.config, self.in_bound_idxs) # Post process estimations est_idxs, est_labels = self._postprocess(my_bounds, my_labels) assert est_idxs[0] == 0 and est_idxs[-1] == F.shape[0] - 1 # We're done! return est_idxs, est_labels
[ "def", "processFlat", "(", "self", ")", ":", "# Preprocess to obtain features (array(n_frames, n_features))", "F", "=", "self", ".", "_preprocess", "(", ")", "F", "=", "librosa", ".", "util", ".", "normalize", "(", "F", ",", "axis", "=", "0", ")", "F", "=", ...
33.75
19.708333
def union(inputtiles, parsenames): """ Returns the unioned shape of a steeam of [<x>, <y>, <z>] tiles in GeoJSON. """ try: inputtiles = click.open_file(inputtiles).readlines() except IOError: inputtiles = [inputtiles] unioned = uniontiles.union(inputtiles, parsenames) for u in unioned: click.echo(json.dumps(u))
[ "def", "union", "(", "inputtiles", ",", "parsenames", ")", ":", "try", ":", "inputtiles", "=", "click", ".", "open_file", "(", "inputtiles", ")", ".", "readlines", "(", ")", "except", "IOError", ":", "inputtiles", "=", "[", "inputtiles", "]", "unioned", ...
32.181818
14.909091
def pitch_hz_to_contour(annotation): '''Convert a pitch_hz annotation to a contour''' annotation.namespace = 'pitch_contour' data = annotation.pop_data() for obs in data: annotation.append(time=obs.time, duration=obs.duration, confidence=obs.confidence, value=dict(index=0, frequency=np.abs(obs.value), voiced=obs.value > 0)) return annotation
[ "def", "pitch_hz_to_contour", "(", "annotation", ")", ":", "annotation", ".", "namespace", "=", "'pitch_contour'", "data", "=", "annotation", ".", "pop_data", "(", ")", "for", "obs", "in", "data", ":", "annotation", ".", "append", "(", "time", "=", "obs", ...
40.583333
15.75
def kalman_transition(filtered_mean, filtered_cov, transition_matrix, transition_noise): """Propagate a filtered distribution through a transition model.""" predicted_mean = _propagate_mean(filtered_mean, transition_matrix, transition_noise) predicted_cov = _propagate_cov(filtered_cov, transition_matrix, transition_noise) return predicted_mean, predicted_cov
[ "def", "kalman_transition", "(", "filtered_mean", ",", "filtered_cov", ",", "transition_matrix", ",", "transition_noise", ")", ":", "predicted_mean", "=", "_propagate_mean", "(", "filtered_mean", ",", "transition_matrix", ",", "transition_noise", ")", "predicted_cov", "...
47
12
def ensure_started(self): """Idempotent channel start""" if self.active: return self self._observer = self._observer_class(**self._observer_params) self.start() self._active = True return self
[ "def", "ensure_started", "(", "self", ")", ":", "if", "self", ".", "active", ":", "return", "self", "self", ".", "_observer", "=", "self", ".", "_observer_class", "(", "*", "*", "self", ".", "_observer_params", ")", "self", ".", "start", "(", ")", "sel...
30.625
16.625
def mkhead(repo, path): """:return: New branch/head instance""" return git.Head(repo, git.Head.to_full_path(path))
[ "def", "mkhead", "(", "repo", ",", "path", ")", ":", "return", "git", ".", "Head", "(", "repo", ",", "git", ".", "Head", ".", "to_full_path", "(", "path", ")", ")" ]
40
10.333333
def validate(self, val): """ A unicode string of the correct length and pattern will pass validation. In PY2, we enforce that a str type must be valid utf-8, and a unicode string will be returned. """ if not isinstance(val, six.string_types): raise ValidationError("'%s' expected to be a string, got %s" % (val, generic_type_name(val))) if not six.PY3 and isinstance(val, str): try: val = val.decode('utf-8') except UnicodeDecodeError: raise ValidationError("'%s' was not valid utf-8") if self.max_length is not None and len(val) > self.max_length: raise ValidationError("'%s' must be at most %d characters, got %d" % (val, self.max_length, len(val))) if self.min_length is not None and len(val) < self.min_length: raise ValidationError("'%s' must be at least %d characters, got %d" % (val, self.min_length, len(val))) if self.pattern and not self.pattern_re.match(val): raise ValidationError("'%s' did not match pattern '%s'" % (val, self.pattern)) return val
[ "def", "validate", "(", "self", ",", "val", ")", ":", "if", "not", "isinstance", "(", "val", ",", "six", ".", "string_types", ")", ":", "raise", "ValidationError", "(", "\"'%s' expected to be a string, got %s\"", "%", "(", "val", ",", "generic_type_name", "(",...
48.653846
22.576923
async def _query( self, path, method="GET", *, params=None, data=None, headers=None, timeout=None, chunked=None ): """ Get the response object by performing the HTTP request. The caller is responsible to finalize the response object. """ url = self._canonicalize_url(path) if headers and "content-type" not in headers: headers["content-type"] = "application/json" try: response = await self.session.request( method, url, params=httpize(params), headers=headers, data=data, timeout=timeout, chunked=chunked, ) except asyncio.TimeoutError: raise if (response.status // 100) in [4, 5]: what = await response.read() content_type = response.headers.get("content-type", "") response.close() if content_type == "application/json": raise DockerError(response.status, json.loads(what.decode("utf8"))) else: raise DockerError(response.status, {"message": what.decode("utf8")}) return response
[ "async", "def", "_query", "(", "self", ",", "path", ",", "method", "=", "\"GET\"", ",", "*", ",", "params", "=", "None", ",", "data", "=", "None", ",", "headers", "=", "None", ",", "timeout", "=", "None", ",", "chunked", "=", "None", ")", ":", "u...
32.102564
17.692308
def list_deep_types(list_): """ Returns all types in a deep list """ type_list = [] for item in list_: if util_type.is_listlike(item): type_list.extend(list_deep_types(item)) else: type_list.append(type(item)) return type_list
[ "def", "list_deep_types", "(", "list_", ")", ":", "type_list", "=", "[", "]", "for", "item", "in", "list_", ":", "if", "util_type", ".", "is_listlike", "(", "item", ")", ":", "type_list", ".", "extend", "(", "list_deep_types", "(", "item", ")", ")", "e...
25.454545
10.545455
def timeout_per_mb(seconds_per_mb, size_bytes): """ Scales timeouts which are size-specific """ result = seconds_per_mb * (size_bytes / 1e6) if result < DEFAULT_TIMEOUT: return DEFAULT_TIMEOUT return result
[ "def", "timeout_per_mb", "(", "seconds_per_mb", ",", "size_bytes", ")", ":", "result", "=", "seconds_per_mb", "*", "(", "size_bytes", "/", "1e6", ")", "if", "result", "<", "DEFAULT_TIMEOUT", ":", "return", "DEFAULT_TIMEOUT", "return", "result" ]
37.5
9.333333
def move_window(self, window, x, y): """ Move a window to a specific location. The top left corner of the window will be moved to the x,y coordinate. :param wid: the window to move :param x: the X coordinate to move to. :param y: the Y coordinate to move to. """ _libxdo.xdo_move_window(self._xdo, window, x, y)
[ "def", "move_window", "(", "self", ",", "window", ",", "x", ",", "y", ")", ":", "_libxdo", ".", "xdo_move_window", "(", "self", ".", "_xdo", ",", "window", ",", "x", ",", "y", ")" ]
33.363636
14.272727
def entry_assemble(entry_fields, ecc_params, header_size, filepath, fileheader=None): '''From an entry with its parameters (filename, filesize), assemble a list of each block from the original file along with the relative hash and ecc for easy processing later.''' # Extract the header from the file if fileheader is None: with open(filepath, 'rb') as file: # filepath is the absolute path to the original file (the one with maybe corruptions, NOT the output repaired file!) # Compute the size of the buffer to read: either header_size if possible, but if the file is smaller than that then we will read the whole file. if entry_fields["filesize"] > 0 and entry_fields["filesize"] < header_size: fileheader = file.read(entry_fields["filesize"]) else: fileheader = file.read(header_size) # Cut the header and the ecc entry into blocks, and then assemble them so that we can easily process block by block entry_asm = [] for i, j in itertools.izip(xrange(0, len(fileheader), ecc_params["message_size"]), xrange(0, len(entry_fields["ecc_field"]), ecc_params["hash_size"] + ecc_params["ecc_size"])): # Extract each fields from each block mes = fileheader[i:i+ecc_params["message_size"]] hash = entry_fields["ecc_field"][j:j+ecc_params["hash_size"]] ecc = entry_fields["ecc_field"][j+ecc_params["hash_size"]:j+ecc_params["hash_size"]+ecc_params["ecc_size"]] entry_asm.append({"message": mes, "hash": hash, "ecc": ecc}) # Return a list of fields for each block return entry_asm
[ "def", "entry_assemble", "(", "entry_fields", ",", "ecc_params", ",", "header_size", ",", "filepath", ",", "fileheader", "=", "None", ")", ":", "# Extract the header from the file", "if", "fileheader", "is", "None", ":", "with", "open", "(", "filepath", ",", "'r...
72.681818
47.318182
def broadcast_transaction(hex_tx, blockchain_client): """ Dispatches a raw hex transaction to the network. """ if isinstance(blockchain_client, BlockcypherClient): return blockcypher.broadcast_transaction(hex_tx, blockchain_client) elif isinstance(blockchain_client, BlockchainInfoClient): return blockchain_info.broadcast_transaction(hex_tx, blockchain_client) elif isinstance(blockchain_client, ChainComClient): return chain_com.broadcast_transaction(hex_tx, blockchain_client) elif isinstance(blockchain_client, (BitcoindClient, AuthServiceProxy)): return bitcoind.broadcast_transaction(hex_tx, blockchain_client) elif hasattr(blockchain_client, "broadcast_transaction"): return blockchain_client.broadcast_transaction( hex_tx ) elif isinstance(blockchain_client, BlockchainClient): raise Exception('That blockchain interface is not supported.') else: raise Exception('A BlockchainClient object is required')
[ "def", "broadcast_transaction", "(", "hex_tx", ",", "blockchain_client", ")", ":", "if", "isinstance", "(", "blockchain_client", ",", "BlockcypherClient", ")", ":", "return", "blockcypher", ".", "broadcast_transaction", "(", "hex_tx", ",", "blockchain_client", ")", ...
58.058824
22.705882
def prop_modifier(self, cls: ClassDefinition, slot: SlotDefinition) -> str: """ Return the modifiers for the slot: (i) - inherited (m) - inherited through mixin (a) - injected (pk) - primary ckey @param cls: @param slot: @return: """ pk = '(pk)' if slot.primary_key else '' inherited = slot.name not in cls.slots mixin = inherited and slot.name in \ [mslot.name for mslot in [self.schema.classes[m] for m in cls.mixins]] injected = cls.name in self.synopsis.applytos and \ slot.name in [aslot.name for aslot in [self.schema.classes[a] for a in sorted(self.synopsis.applytos[cls.name].classrefs)]] return pk + '(a)' if injected else '(m)' if mixin else '(i)' if inherited else ''
[ "def", "prop_modifier", "(", "self", ",", "cls", ":", "ClassDefinition", ",", "slot", ":", "SlotDefinition", ")", "->", "str", ":", "pk", "=", "'(pk)'", "if", "slot", ".", "primary_key", "else", "''", "inherited", "=", "slot", ".", "name", "not", "in", ...
45.105263
21.157895
def _clean_xml(self, path_to_xml): """Clean MARCXML harvested from OAI. Allows the xml to be used with BibUpload or BibRecord. :param xml: either XML as a string or path to an XML file :return: ElementTree of clean data """ try: if os.path.isfile(path_to_xml): tree = ET.parse(path_to_xml) root = tree.getroot() else: root = ET.fromstring(path_to_xml) except Exception, e: self.logger.error("Could not read OAI XML, aborting filter!") raise e strip_xml_namespace(root) return root
[ "def", "_clean_xml", "(", "self", ",", "path_to_xml", ")", ":", "try", ":", "if", "os", ".", "path", ".", "isfile", "(", "path_to_xml", ")", ":", "tree", "=", "ET", ".", "parse", "(", "path_to_xml", ")", "root", "=", "tree", ".", "getroot", "(", ")...
31.6
16.95
def _next_with_retry(self): """Return the next chunk and retry once on CursorNotFound. We retry on CursorNotFound to maintain backwards compatibility in cases where two calls to read occur more than 10 minutes apart (the server's default cursor timeout). """ if self._cursor is None: self._create_cursor() try: return self._cursor.next() except CursorNotFound: self._cursor.close() self._create_cursor() return self._cursor.next()
[ "def", "_next_with_retry", "(", "self", ")", ":", "if", "self", ".", "_cursor", "is", "None", ":", "self", ".", "_create_cursor", "(", ")", "try", ":", "return", "self", ".", "_cursor", ".", "next", "(", ")", "except", "CursorNotFound", ":", "self", "....
33.8125
14.625
def LogoOverlay(sites, overlayfile, overlay, nperline, sitewidth, rmargin, logoheight, barheight, barspacing, fix_limits={}, fixlongname=False, overlay_cmap=None, underlay=False, scalebar=False): """Makes overlay for *LogoPlot*. This function creates colored bars overlay bars showing up to two properties. The trick of this function is to create the bars the right size so they align when they overlay the logo plot. CALLING VARIABLES: * *sites* : same as the variable of this name used by *LogoPlot*. * *overlayfile* is a string giving the name of created PDF file containing the overlay. It must end in the extension ``.pdf``. * *overlay* : same as the variable of this name used by *LogoPlot*. * *nperline* : same as the variable of this name used by *LogoPlot*. * *sitewidth* is the width of each site in points. * *rmargin* is the right margin in points. * *logoheight* is the total height of each logo row in points. * *barheight* is the total height of each bar in points. * *barspacing* is the vertical spacing between bars in points. * *fix_limits* has the same meaning of the variable of this name used by *LogoPlot*. * *fixlongname* has the same meaning of the variable of this name used by *LogoPlot*. * *overlay_cmap* has the same meaning of the variable of this name used by *LogoPlot*. * *underlay* is a bool. If `True`, make an underlay rather than an overlay. * *scalebar: if not `False`, is 2-tuple `(scalebarheight, scalebarlabel)` where `scalebarheight` is in points. """ if os.path.splitext(overlayfile)[1] != '.pdf': raise ValueError("overlayfile must end in .pdf: %s" % overlayfile) if not overlay_cmap: (cmap, mapping_d, mapper) = KyteDoolittleColorMapping() else: mapper = pylab.cm.ScalarMappable(cmap=overlay_cmap) cmap = mapper.get_cmap() pts_per_inch = 72.0 # to convert between points and inches # some general properties of the plot matplotlib.rc('text', usetex=True) matplotlib.rc('xtick', labelsize=8) matplotlib.rc('xtick', direction='out') matplotlib.rc('ytick', direction='out') matplotlib.rc('axes', linewidth=0.5) matplotlib.rc('ytick.major', size=3) matplotlib.rc('xtick.major', size=2.5) # define sizes (still in points) colorbar_bmargin = 20 # margin below color bars in points colorbar_tmargin = 15 # margin above color bars in points nlines = int(math.ceil(len(sites) / float(nperline))) lmargin = 25 # left margin in points barwidth = nperline * sitewidth figwidth = lmargin + rmargin + barwidth figheight = nlines * (logoheight + len(overlay) * (barheight + barspacing)) + (barheight + colorbar_bmargin + colorbar_tmargin) + ( int(underlay) * len(overlay) * (barheight + barspacing)) # set up the figure and axes fig = pylab.figure(figsize=(figwidth / pts_per_inch, figheight / pts_per_inch)) # determine property types prop_types = {} for (prop_d, shortname, longname) in overlay: if shortname == longname == 'wildtype': assert all([(isinstance(prop, str) and len(prop) == 1) for prop in prop_d.values()]), 'prop_d does not give letters' proptype = 'wildtype' (vmin, vmax) = (0, 1) # not used, but need to be assigned propcategories = None # not used, but needs to be assigned elif all([isinstance(prop, str) for prop in prop_d.values()]): proptype = 'discrete' propcategories = list(set(prop_d.values())) propcategories.sort() (vmin, vmax) = (0, len(propcategories) - 1) elif all ([isinstance(prop, (int, float)) for prop in prop_d.values()]): proptype = 'continuous' propcategories = None (vmin, vmax) = (min(prop_d.values()), max(prop_d.values())) # If vmin is slightly greater than zero, set it to zero. This helps for RSA properties. if vmin >= 0 and vmin / float(vmax - vmin) < 0.05: vmin = 0.0 # And if vmax is just a bit less than one, set it to that... if 0.9 <= vmax <= 1.0: vmax = 1.0 else: raise ValueError("Property %s is neither continuous or discrete. Values are:\n%s" % (shortname, str(prop_d.items()))) if shortname in fix_limits: (vmin, vmax) = (min(fix_limits[shortname][0]), max(fix_limits[shortname][0])) assert vmin < vmax, "vmin >= vmax, did you incorrectly use fix_vmin and fix_vmax?" prop_types[shortname] = (proptype, vmin, vmax, propcategories) assert len(prop_types) == len(overlay), "Not as many property types as overlays. Did you give the same name (shortname) to multiple properties in the overlay?" # loop over each line of the multi-lined plot prop_image = {} for iline in range(nlines): isites = sites[iline * nperline : min(len(sites), (iline + 1) * nperline)] xlength = len(isites) * sitewidth logo_ax = pylab.axes([lmargin / figwidth, ((nlines - iline - 1) * (logoheight + len(overlay) * (barspacing + barheight))) / figheight, xlength / figwidth, logoheight / figheight], frameon=False) logo_ax.yaxis.set_ticks_position('none') logo_ax.xaxis.set_ticks_position('none') pylab.yticks([]) pylab.xlim(0.5, len(isites) + 0.5) pylab.xticks([]) for (iprop, (prop_d, shortname, longname)) in enumerate(overlay): (proptype, vmin, vmax, propcategories) = prop_types[shortname] prop_ax = pylab.axes([ lmargin / figwidth, ((nlines - iline - 1) * (logoheight + len(overlay) * (barspacing + barheight)) + (1 - int(underlay)) * logoheight + int(underlay) * barspacing + iprop * (barspacing + barheight)) / figheight, xlength / figwidth, barheight / figheight], frameon=(proptype != 'wildtype')) prop_ax.xaxis.set_ticks_position('none') pylab.xticks([]) pylab.xlim((0, len(isites))) pylab.ylim(-0.5, 0.5) if proptype == 'wildtype': pylab.yticks([]) prop_ax.yaxis.set_ticks_position('none') for (isite, site) in enumerate(isites): pylab.text(isite + 0.5, -0.5, prop_d[site], size=9, horizontalalignment='center', family='monospace') continue pylab.yticks([0], [shortname], size=8) prop_ax.yaxis.set_ticks_position('left') propdata = pylab.zeros(shape=(1, len(isites))) propdata[ : ] = pylab.nan # set to nan for all entries for (isite, site) in enumerate(isites): if site in prop_d: if proptype == 'continuous': propdata[(0, isite)] = prop_d[site] elif proptype == 'discrete': propdata[(0, isite)] = propcategories.index(prop_d[site]) else: raise ValueError('neither continuous nor discrete') prop_image[shortname] = pylab.imshow(propdata, interpolation='nearest', aspect='auto', extent=[0, len(isites), 0.5, -0.5], cmap=cmap, vmin=vmin, vmax=vmax) pylab.yticks([0], [shortname], size=8) # set up colorbar axes, then color bars ncolorbars = len([p for p in prop_types.values() if p[0] != 'wildtype']) if scalebar: ncolorbars += 1 if ncolorbars == 1: colorbarwidth = 0.4 colorbarspacingwidth = 1.0 - colorbarwidth elif ncolorbars: colorbarspacingfrac = 0.5 # space between color bars is this fraction of bar width colorbarwidth = 1.0 / (ncolorbars * (1.0 + colorbarspacingfrac)) # width of color bars in fraction of figure width colorbarspacingwidth = colorbarwidth * colorbarspacingfrac # width of color bar spacing in fraction of figure width # bottom of color bars ybottom = 1.0 - (colorbar_tmargin + barheight) / figheight propnames = {} icolorbar = -1 icolorbarshift = 0 while icolorbar < len(overlay): if icolorbar == -1: # show scale bar if being used icolorbar += 1 if scalebar: (scalebarheight, scalebarlabel) = scalebar xleft = (colorbarspacingwidth * 0.5 + icolorbar * (colorbarwidth + colorbarspacingwidth)) ytop = 1 - colorbar_tmargin / figheight scalebarheightfrac = scalebarheight / figheight # follow here for fig axes: https://stackoverflow.com/a/5022412 fullfigax = pylab.axes([0, 0, 1, 1], facecolor=(1, 1, 1, 0)) fullfigax.axvline(x=xleft, ymin=ytop - scalebarheightfrac, ymax=ytop, color='black', linewidth=1.5) pylab.text(xleft + 0.005, ytop - scalebarheightfrac / 2.0, scalebarlabel, verticalalignment='center', horizontalalignment='left', transform=fullfigax.transAxes) continue (prop_d, shortname, longname) = overlay[icolorbar] icolorbar += 1 (proptype, vmin, vmax, propcategories) = prop_types[shortname] if proptype == 'wildtype': icolorbarshift += 1 continue if shortname == longname or not longname: propname = shortname elif fixlongname: propname = longname else: propname = "%s (%s)" % (longname, shortname) colorbar_ax = pylab.axes([colorbarspacingwidth * 0.5 + (icolorbar - icolorbarshift - int(not bool(scalebar))) * (colorbarwidth + colorbarspacingwidth), ybottom, colorbarwidth, barheight / figheight], frameon=True) colorbar_ax.xaxis.set_ticks_position('bottom') colorbar_ax.yaxis.set_ticks_position('none') pylab.xticks([]) pylab.yticks([]) pylab.title(propname, size=9) if proptype == 'continuous': cb = pylab.colorbar(prop_image[shortname], cax=colorbar_ax, orientation='horizontal') # if range is close to zero to one, manually set tics to 0, 0.5, 1. This helps for RSA if -0.1 <= vmin <= 0 and 1.0 <= vmax <= 1.15: cb.set_ticks([0, 0.5, 1]) cb.set_ticklabels(['0', '0.5', '1']) # if it seems plausible, set integer ticks if 4 < (vmax - vmin) <= 11: fixedticks = [itick for itick in range(int(vmin), int(vmax) + 1)] cb.set_ticks(fixedticks) cb.set_ticklabels([str(itick) for itick in fixedticks]) elif proptype == 'discrete': cb = pylab.colorbar(prop_image[shortname], cax=colorbar_ax, orientation='horizontal', boundaries=[i for i in range(len(propcategories) + 1)], values=[i for i in range(len(propcategories))]) cb.set_ticks([i + 0.5 for i in range(len(propcategories))]) cb.set_ticklabels(propcategories) else: raise ValueError("Invalid proptype") if shortname in fix_limits: (ticklocs, ticknames) = fix_limits[shortname] cb.set_ticks(ticklocs) cb.set_ticklabels(ticknames) # save the plot pylab.savefig(overlayfile, transparent=True)
[ "def", "LogoOverlay", "(", "sites", ",", "overlayfile", ",", "overlay", ",", "nperline", ",", "sitewidth", ",", "rmargin", ",", "logoheight", ",", "barheight", ",", "barspacing", ",", "fix_limits", "=", "{", "}", ",", "fixlongname", "=", "False", ",", "ove...
50.287611
24.495575
def eigen_decomposition(G, n_components=8, eigen_solver='auto', random_state=None, drop_first=True, largest=True, solver_kwds=None): """ Function to compute the eigendecomposition of a square matrix. Parameters ---------- G : array_like or sparse matrix The square matrix for which to compute the eigen-decomposition. n_components : integer, optional The number of eigenvectors to return eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'} 'auto' : attempt to choose the best method for input data (default) 'dense' : use standard dense matrix operations for the eigenvalue decomposition. For this method, M must be an array or matrix type. This method should be avoided for large problems. 'arpack' : use arnoldi iteration in shift-invert mode. For this method, M may be a dense matrix, sparse matrix, or general linear operator. Warning: ARPACK can be unstable for some problems. It is best to try several random seeds in order to check results. 'lobpcg' : Locally Optimal Block Preconditioned Conjugate Gradient Method. A preconditioned eigensolver for large symmetric positive definite (SPD) generalized eigenproblems. 'amg' : Algebraic Multigrid solver (requires ``pyamg`` to be installed) It can be faster on very large, sparse problems, but may also lead to instabilities. random_state : int seed, RandomState instance, or None (default) A pseudo random number generator used for the initialization of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'. By default, arpack is used. solver_kwds : any additional keyword arguments to pass to the selected eigen_solver Returns ------- lambdas, diffusion_map : eigenvalues, eigenvectors """ n_nodes = G.shape[0] if drop_first: n_components = n_components + 1 eigen_solver, solver_kwds = check_eigen_solver(eigen_solver, solver_kwds, size=n_nodes, nvec=n_components) random_state = check_random_state(random_state) # Convert G to best type for eigendecomposition if sparse.issparse(G): if G.getformat() is not 'csr': G.tocsr() G = G.astype(np.float) # Check for symmetry is_symmetric = _is_symmetric(G) # Try Eigen Methods: if eigen_solver == 'arpack': # This matches the internal initial state used by ARPACK v0 = random_state.uniform(-1, 1, G.shape[0]) if is_symmetric: if largest: which = 'LM' else: which = 'SM' lambdas, diffusion_map = eigsh(G, k=n_components, which=which, v0=v0,**(solver_kwds or {})) else: if largest: which = 'LR' else: which = 'SR' lambdas, diffusion_map = eigs(G, k=n_components, which=which, **(solver_kwds or {})) lambdas = np.real(lambdas) diffusion_map = np.real(diffusion_map) elif eigen_solver == 'amg': # separate amg & lobpcg keywords: if solver_kwds is not None: amg_kwds = {} lobpcg_kwds = solver_kwds.copy() for kwd in AMG_KWDS: if kwd in solver_kwds.keys(): amg_kwds[kwd] = solver_kwds[kwd] del lobpcg_kwds[kwd] else: amg_kwds = None lobpcg_kwds = None if not is_symmetric: raise ValueError("lobpcg requires symmetric matrices.") if not sparse.issparse(G): warnings.warn("AMG works better for sparse matrices") # Use AMG to get a preconditioner and speed up the eigenvalue problem. ml = smoothed_aggregation_solver(check_array(G, accept_sparse = ['csr']),**(amg_kwds or {})) M = ml.aspreconditioner() n_find = min(n_nodes, 5 + 2*n_components) X = random_state.rand(n_nodes, n_find) X[:, 0] = (G.diagonal()).ravel() lambdas, diffusion_map = lobpcg(G, X, M=M, largest=largest,**(lobpcg_kwds or {})) sort_order = np.argsort(lambdas) if largest: lambdas = lambdas[sort_order[::-1]] diffusion_map = diffusion_map[:, sort_order[::-1]] else: lambdas = lambdas[sort_order] diffusion_map = diffusion_map[:, sort_order] lambdas = lambdas[:n_components] diffusion_map = diffusion_map[:, :n_components] elif eigen_solver == "lobpcg": if not is_symmetric: raise ValueError("lobpcg requires symmetric matrices.") n_find = min(n_nodes, 5 + 2*n_components) X = random_state.rand(n_nodes, n_find) lambdas, diffusion_map = lobpcg(G, X, largest=largest,**(solver_kwds or {})) sort_order = np.argsort(lambdas) if largest: lambdas = lambdas[sort_order[::-1]] diffusion_map = diffusion_map[:, sort_order[::-1]] else: lambdas = lambdas[sort_order] diffusion_map = diffusion_map[:, sort_order] lambdas = lambdas[:n_components] diffusion_map = diffusion_map[:, :n_components] elif eigen_solver == 'dense': if sparse.isspmatrix(G): G = G.todense() if is_symmetric: lambdas, diffusion_map = eigh(G,**(solver_kwds or {})) else: lambdas, diffusion_map = eig(G,**(solver_kwds or {})) sort_index = np.argsort(lambdas) lambdas = lambdas[sort_index] diffusion_map[:,sort_index] if largest:# eigh always returns eigenvalues in ascending order lambdas = lambdas[::-1] # reverse order the e-values diffusion_map = diffusion_map[:, ::-1] # reverse order the vectors lambdas = lambdas[:n_components] diffusion_map = diffusion_map[:, :n_components] return (lambdas, diffusion_map)
[ "def", "eigen_decomposition", "(", "G", ",", "n_components", "=", "8", ",", "eigen_solver", "=", "'auto'", ",", "random_state", "=", "None", ",", "drop_first", "=", "True", ",", "largest", "=", "True", ",", "solver_kwds", "=", "None", ")", ":", "n_nodes", ...
42.867133
18.769231
def process_args(): """ Parse command-line arguments. """ parser = argparse.ArgumentParser( description=("A script which can be run to tune a NeuroML 2 model against a number of target properties. Work in progress!")) parser.add_argument('prefix', type=str, metavar='<prefix>', help="Prefix for optimisation run") parser.add_argument('neuromlFile', type=str, metavar='<neuromlFile>', help="NeuroML2 file containing model") parser.add_argument('target', type=str, metavar='<target>', help="Target in NeuroML2 model") parser.add_argument('parameters', type=str, metavar='<parameters>', help="List of parameter to adjust") parser.add_argument('maxConstraints', type=str, metavar='<max_constraints>', help="Max values for parameters") parser.add_argument('minConstraints', type=str, metavar='<min_constraints>', help="Min values for parameters") parser.add_argument('targetData', type=str, metavar='<targetData>', help="List of name/value pairs for properties extracted from data to judge fitness against") parser.add_argument('weights', type=str, metavar='<weights>', help="Weights to assign to each target name/value pair") parser.add_argument('-simTime', type=float, metavar='<simTime>', default=DEFAULTS['simTime'], help="Simulation duration") parser.add_argument('-dt', type=float, metavar='<dt>', default=DEFAULTS['dt'], help="Simulation timestep") parser.add_argument('-analysisStartTime', type=float, metavar='<analysisStartTime>', default=DEFAULTS['analysisStartTime'], help="Analysis start time") parser.add_argument('-populationSize', type=int, metavar='<populationSize>', default=DEFAULTS['populationSize'], help="Population size") parser.add_argument('-maxEvaluations', type=int, metavar='<maxEvaluations>', default=DEFAULTS['maxEvaluations'], help="Maximum evaluations") parser.add_argument('-numSelected', type=int, metavar='<numSelected>', default=DEFAULTS['numSelected'], help="Number selected") parser.add_argument('-numOffspring', type=int, metavar='<numOffspring>', default=DEFAULTS['numOffspring'], help="Number offspring") parser.add_argument('-mutationRate', type=float, metavar='<mutationRate>', default=DEFAULTS['mutationRate'], help="Mutation rate") parser.add_argument('-numElites', type=int, metavar='<numElites>', default=DEFAULTS['numElites'], help="Number of elites") parser.add_argument('-numParallelEvaluations', type=int, metavar='<numParallelEvaluations>', default=DEFAULTS['numParallelEvaluations'], help="Number of evaluations to run in parallel") parser.add_argument('-seed', type=int, metavar='<seed>', default=DEFAULTS['seed'], help="Seed for optimiser") parser.add_argument('-simulator', type=str, metavar='<simulator>', default=DEFAULTS['simulator'], help="Simulator to run") parser.add_argument('-knownTargetValues', type=str, metavar='<knownTargetValues>', help="List of name/value pairs which represent the known values of the target parameters") parser.add_argument('-nogui', action='store_true', default=DEFAULTS['nogui'], help="Should GUI elements be supressed?") parser.add_argument('-showPlotAlready', action='store_true', default=DEFAULTS['showPlotAlready'], help="Should generated plots be suppressed until show() called?") parser.add_argument('-verbose', action='store_true', default=DEFAULTS['verbose'], help="Verbose mode") parser.add_argument('-dryRun', action='store_true', default=DEFAULTS['dryRun'], help="Dry run; just print setup information") parser.add_argument('-extraReportInfo', type=str, metavar='<extraReportInfo>', default=DEFAULTS['extraReportInfo'], help='Extra tag/value pairs can be put into the report.json: -extraReportInfo=["tag":"value"]') parser.add_argument('-cleanup', action='store_true', default=DEFAULTS['cleanup'], help="Should (some) generated files, e.g. *.dat, be deleted as optimisation progresses?") return parser.parse_args()
[ "def", "process_args", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "(", "\"A script which can be run to tune a NeuroML 2 model against a number of target properties. Work in progress!\"", ")", ")", "parser", ".", "add_argument", "...
42.85443
13.594937
def uniform(low:Number, high:Number=None, size:Optional[List[int]]=None)->FloatOrTensor: "Draw 1 or shape=`size` random floats from uniform dist: min=`low`, max=`high`." if high is None: high=low return random.uniform(low,high) if size is None else torch.FloatTensor(*listify(size)).uniform_(low,high)
[ "def", "uniform", "(", "low", ":", "Number", ",", "high", ":", "Number", "=", "None", ",", "size", ":", "Optional", "[", "List", "[", "int", "]", "]", "=", "None", ")", "->", "FloatOrTensor", ":", "if", "high", "is", "None", ":", "high", "=", "lo...
77.5
43
def _is_small_molecule(pe): """Return True if the element is a small molecule""" val = isinstance(pe, _bp('SmallMolecule')) or \ isinstance(pe, _bpimpl('SmallMolecule')) or \ isinstance(pe, _bp('SmallMoleculeReference')) or \ isinstance(pe, _bpimpl('SmallMoleculeReference')) return val
[ "def", "_is_small_molecule", "(", "pe", ")", ":", "val", "=", "isinstance", "(", "pe", ",", "_bp", "(", "'SmallMolecule'", ")", ")", "or", "isinstance", "(", "pe", ",", "_bpimpl", "(", "'SmallMolecule'", ")", ")", "or", "isinstance", "(", "pe", ",", "_...
46.857143
15.714286
def get_lattice_vector_equivalence(point_symmetry): """Return (b==c, c==a, a==b)""" # primitive_vectors: column vectors equivalence = [False, False, False] for r in point_symmetry: if (np.abs(r[:, 0]) == [0, 1, 0]).all(): equivalence[2] = True if (np.abs(r[:, 0]) == [0, 0, 1]).all(): equivalence[1] = True if (np.abs(r[:, 1]) == [1, 0, 0]).all(): equivalence[2] = True if (np.abs(r[:, 1]) == [0, 0, 1]).all(): equivalence[0] = True if (np.abs(r[:, 2]) == [1, 0, 0]).all(): equivalence[1] = True if (np.abs(r[:, 2]) == [0, 1, 0]).all(): equivalence[0] = True return equivalence
[ "def", "get_lattice_vector_equivalence", "(", "point_symmetry", ")", ":", "# primitive_vectors: column vectors", "equivalence", "=", "[", "False", ",", "False", ",", "False", "]", "for", "r", "in", "point_symmetry", ":", "if", "(", "np", ".", "abs", "(", "r", ...
35
10.65
def _set_af_ipv4_unicast(self, v, load=False): """ Setter method for af_ipv4_unicast, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/address_family/ipv4/af_ipv4_unicast (container) If this variable is read-only (config: false) in the source YANG file, then _set_af_ipv4_unicast is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_af_ipv4_unicast() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=af_ipv4_unicast.af_ipv4_unicast, is_container='container', presence=True, yang_name="af-ipv4-unicast", rest_name="unicast", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IPv4 unicast address Family', u'cli-full-no': None, u'callpoint': u'IsisAfIpv4Ucast', u'cli-add-mode': None, u'cli-full-command': None, u'alt-name': u'unicast', u'cli-mode-name': u'config-router-isis-ipv4u'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """af_ipv4_unicast must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=af_ipv4_unicast.af_ipv4_unicast, is_container='container', presence=True, yang_name="af-ipv4-unicast", rest_name="unicast", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IPv4 unicast address Family', u'cli-full-no': None, u'callpoint': u'IsisAfIpv4Ucast', u'cli-add-mode': None, u'cli-full-command': None, u'alt-name': u'unicast', u'cli-mode-name': u'config-router-isis-ipv4u'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)""", }) self.__af_ipv4_unicast = t if hasattr(self, '_set'): self._set()
[ "def", "_set_af_ipv4_unicast", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", ...
93.818182
45.409091
def compute_partial_energy(self, removed_indices): """ Gives total ewald energy for certain sites being removed, i.e. zeroed out. """ total_energy_matrix = self.total_energy_matrix.copy() for i in removed_indices: total_energy_matrix[i, :] = 0 total_energy_matrix[:, i] = 0 return sum(sum(total_energy_matrix))
[ "def", "compute_partial_energy", "(", "self", ",", "removed_indices", ")", ":", "total_energy_matrix", "=", "self", ".", "total_energy_matrix", ".", "copy", "(", ")", "for", "i", "in", "removed_indices", ":", "total_energy_matrix", "[", "i", ",", ":", "]", "="...
38.1
10.9
def get_bin_hierarchy_design_session(self): """Gets the bin hierarchy design session. return: (osid.resource.BinHierarchyDesignSession) - a ``BinHierarchyDesignSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_bin_hierarchy_design()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_bin_hierarchy_design()`` is ``true``.* """ if not self.supports_bin_hierarchy_design(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.BinHierarchyDesignSession(runtime=self._runtime)
[ "def", "get_bin_hierarchy_design_session", "(", "self", ")", ":", "if", "not", "self", ".", "supports_bin_hierarchy_design", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "# pylint: disable=no-member", "return", "sessions", ".", "BinHierarchyDesign...
42.9375
16.6875
def server_chan_push(title, content, key=None): """使用server酱推送消息到微信,关于server酱, 请参考:http://sc.ftqq.com/3.version :param title: str 消息标题 :param content: str 消息内容,最长64Kb,可空,支持MarkDown :param key: str 从[Server酱](https://sc.ftqq.com/3.version)获取的key :return: None """ if not key: raise ValueError("请配置key,如果还没有key," "可以到这里申请一个:http://sc.ftqq.com/3.version") url = 'https://sc.ftqq.com/%s.send' % key requests.post(url, data={'text': title, 'desp': content})
[ "def", "server_chan_push", "(", "title", ",", "content", ",", "key", "=", "None", ")", ":", "if", "not", "key", ":", "raise", "ValueError", "(", "\"请配置key,如果还没有key,\"", "\"可以到这里申请一个:http://sc.ftqq.com/3.version\")", "", "url", "=", "'https://sc.ftqq.com/%s.send'", "...
31.411765
15.352941
def copy_config_from_repo(namespace, workspace, from_cnamespace, from_config, from_snapshot_id, to_cnamespace, to_config): """Copy a method config from the methods repository to a workspace. Args: namespace (str): project to which workspace belongs workspace (str): Workspace name from_cnamespace (str): Source configuration namespace from_config (str): Source configuration name from_snapshot_id (int): Source configuration snapshot_id to_cnamespace (str): Target configuration namespace to_config (str): Target configuration name Swagger: https://api.firecloud.org/#!/Method_Configurations/copyFromMethodRepo DUPLICATE: https://api.firecloud.org/#!/Method_Repository/copyFromMethodRepo """ body = { "configurationNamespace" : from_cnamespace, "configurationName" : from_config, "configurationSnapshotId" : from_snapshot_id, "destinationNamespace" : to_cnamespace, "destinationName" : to_config } uri = "workspaces/{0}/{1}/method_configs/copyFromMethodRepo".format( namespace, workspace) return __post(uri, json=body)
[ "def", "copy_config_from_repo", "(", "namespace", ",", "workspace", ",", "from_cnamespace", ",", "from_config", ",", "from_snapshot_id", ",", "to_cnamespace", ",", "to_config", ")", ":", "body", "=", "{", "\"configurationNamespace\"", ":", "from_cnamespace", ",", "\...
43.551724
21.068966
def save(self, dbfile=None, password=None, keyfile=None): """ Save the database to specified file/stream with password and/or keyfile. :param dbfile: The path to the file we wish to save. :type dbfile: The path to the database file or a file-like object. :param password: The password to use for the database encryption key. :type password: str :param keyfile: The path to keyfile (or a stream) to use instead of or in conjunction with password for encryption key. :type keyfile: str or file-like object :raise keepassdb.exc.ReadOnlyDatabase: If database was opened with readonly flag. """ if self.readonly: # We might wish to make this more sophisticated. E.g. if a new path is specified # as a parameter, then it's probably ok to ignore a readonly flag? In general # this flag doens't make a ton of sense for a library ... raise exc.ReadOnlyDatabase() if dbfile is not None and not hasattr(dbfile, 'write'): self.filepath = dbfile if password is not None or self.keyfile is not None: # Do these together so we don't end up with some hybrid of old & new key material self.password = password self.keyfile = keyfile else: raise ValueError("Password and/or keyfile is required.") if self.filepath is None and dbfile is None: raise ValueError("Unable to save without target file.") buf = bytearray() # First, serialize the groups for group in self.groups: # Get the packed bytes group_struct = group.to_struct() self.log.debug("Group struct: {0!r}".format(group_struct)) buf += group_struct.encode() # Then the entries. for entry in self.entries: entry_struct = entry.to_struct() buf += entry_struct.encode() # Hmmmm ... these defaults should probably be set elsewhere....? header = HeaderStruct() header.signature1 = const.DB_SIGNATURE1 header.signature2 = const.DB_SIGNATURE2 header.flags = header.AES header.version = 0x00030002 header.key_enc_rounds = 50000 header.seed_key = get_random_bytes(32) # Convert buffer to bytes for API simplicity buf = bytes(buf) # Generate new seed & vector; update content hash header.encryption_iv = get_random_bytes(16) header.seed_rand = get_random_bytes(16) header.contents_hash = hashlib.sha256(buf).digest() self.log.debug("(Unencrypted) content: {0!r}".format(buf)) self.log.debug("Generating hash for {0}-byte content: {1}".format(len(buf), hashlib.sha256(buf).digest())) # Update num groups/entries to match curr state header.nentries = len(self.entries) header.ngroups = len(self.groups) final_key = util.derive_key(seed_key=header.seed_key, seed_rand=header.seed_rand, rounds=header.key_enc_rounds, password=password, keyfile=keyfile) # FIXME: Remove this once we've tracked down issues. self.log.debug("(save) Final key: {0!r}, pass={1}".format(final_key, password)) encrypted_content = util.encrypt_aes_cbc(buf, key=final_key, iv=header.encryption_iv) if hasattr(dbfile, 'write'): dbfile.write(header.encode() + encrypted_content) else: with open(self.filepath, "wb") as fp: fp.write(header.encode() + encrypted_content)
[ "def", "save", "(", "self", ",", "dbfile", "=", "None", ",", "password", "=", "None", ",", "keyfile", "=", "None", ")", ":", "if", "self", ".", "readonly", ":", "# We might wish to make this more sophisticated. E.g. if a new path is specified", "# as a parameter, the...
44.535714
21.892857
def make_api_call(self, method, url, json_params=None): """ Accesses the branch API :param method: The HTTP method :param url: The URL :param json_params: JSON parameters :return: The parsed response """ url = self.BRANCH_BASE_URI+url if self.verbose is True: print("Making web request: {}".format(url)) if json_params is not None: encoded_params = json.dumps(json_params) headers = {'Content-Type': 'application/json'} else: encoded_params = None headers = {} if encoded_params is not None and self.verbose is True: print("Params: {}".format(encoded_params)) request = Request(url, encoded_params.encode('utf-8'), headers) request.get_method = lambda: method response = urlopen(request).read() return json.loads(response.decode('utf-8'))
[ "def", "make_api_call", "(", "self", ",", "method", ",", "url", ",", "json_params", "=", "None", ")", ":", "url", "=", "self", ".", "BRANCH_BASE_URI", "+", "url", "if", "self", ".", "verbose", "is", "True", ":", "print", "(", "\"Making web request: {}\"", ...
32.714286
15.714286
def save_load(jid, load, minions=None): ''' Save the load to the specified jid ''' serv = _get_serv(ret=None) serv.set(jid, salt.utils.json.dumps(load)) _append_list(serv, 'jids', jid)
[ "def", "save_load", "(", "jid", ",", "load", ",", "minions", "=", "None", ")", ":", "serv", "=", "_get_serv", "(", "ret", "=", "None", ")", "serv", ".", "set", "(", "jid", ",", "salt", ".", "utils", ".", "json", ".", "dumps", "(", "load", ")", ...
28.857143
12.857143
def dict_diff(d1: Dict[Any, Any], d2: Dict[Any, Any], deleted_value: Any = None) -> Dict[Any, Any]: """ Returns a representation of the changes that need to be made to ``d1`` to create ``d2``. Args: d1: a dictionary d2: another dictionary deleted_value: value to use for deleted keys; see below Returns: dict: a dictionary of the format ``{k: v}`` where the ``k``/``v`` pairs are key/value pairs that are absent from ``d1`` and present in ``d2``, or present in both but with different values (in which case the ``d2`` value is shown). If a key ``k`` is present in ``d1`` but absent in ``d2``, the result dictionary has the entry ``{k: deleted_value}``. """ changes = {k: v for k, v in d2.items() if k not in d1 or d2[k] != d1[k]} for k in d1.keys(): if k not in d2: changes[k] = deleted_value return changes
[ "def", "dict_diff", "(", "d1", ":", "Dict", "[", "Any", ",", "Any", "]", ",", "d2", ":", "Dict", "[", "Any", ",", "Any", "]", ",", "deleted_value", ":", "Any", "=", "None", ")", "->", "Dict", "[", "Any", ",", "Any", "]", ":", "changes", "=", ...
37.4
22.84
def NewFromCmyk(c, m, y, k, alpha=1.0, wref=_DEFAULT_WREF): '''Create a new instance based on the specifed CMYK values. Parameters: :c: The Cyan component value [0...1] :m: The Magenta component value [0...1] :y: The Yellow component value [0...1] :k: The Black component value [0...1] :alpha: The color transparency [0...1], default is opaque :wref: The whitepoint reference, default is 2° D65. Returns: A grapefruit.Color instance. >>> str(Color.NewFromCmyk(1, 0.32, 0, 0.5)) '(0, 0.34, 0.5, 1)' >>> str(Color.NewFromCmyk(1, 0.32, 0, 0.5, 0.5)) '(0, 0.34, 0.5, 0.5)' ''' return Color(Color.CmyToRgb(*Color.CmykToCmy(c, m, y, k)), 'rgb', alpha, wref)
[ "def", "NewFromCmyk", "(", "c", ",", "m", ",", "y", ",", "k", ",", "alpha", "=", "1.0", ",", "wref", "=", "_DEFAULT_WREF", ")", ":", "return", "Color", "(", "Color", ".", "CmyToRgb", "(", "*", "Color", ".", "CmykToCmy", "(", "c", ",", "m", ",", ...
27.962963
22.259259
def run_tutorial(plot=False, process_len=3600, num_cores=cpu_count()): """Main function to run the tutorial dataset.""" # First we want to load our templates template_names = glob.glob('tutorial_template_*.ms') if len(template_names) == 0: raise IOError('Template files not found, have you run the template ' + 'creation tutorial?') templates = [read(template_name) for template_name in template_names] # Work out what stations we have and get the data for them stations = [] for template in templates: for tr in template: stations.append((tr.stats.station, tr.stats.channel)) # Get a unique list of stations stations = list(set(stations)) # We will loop through the data chunks at a time, these chunks can be any # size, in general we have used 1 day as our standard, but this can be # as short as five minutes (for MAD thresholds) or shorter for other # threshold metrics. However the chunk size should be the same as your # template process_len. # You should test different parameters!!! start_time = UTCDateTime(2016, 1, 4) end_time = UTCDateTime(2016, 1, 5) chunks = [] chunk_start = start_time while chunk_start < end_time: chunk_end = chunk_start + process_len if chunk_end > end_time: chunk_end = end_time chunks.append((chunk_start, chunk_end)) chunk_start += process_len unique_detections = [] # Set up a client to access the GeoNet database client = Client("GEONET") # Note that these chunks do not rely on each other, and could be paralleled # on multiple nodes of a distributed cluster, see the SLURM tutorial for # an example of this. for t1, t2 in chunks: # Generate the bulk information to query the GeoNet database bulk_info = [] for station in stations: bulk_info.append(('NZ', station[0], '*', station[1][0] + 'H' + station[1][-1], t1, t2)) # Note this will take a little while. print('Downloading seismic data, this may take a while') st = client.get_waveforms_bulk(bulk_info) # Merge the stream, it will be downloaded in chunks st.merge(fill_value='interpolate') # Pre-process the data to set frequency band and sampling rate # Note that this is, and MUST BE the same as the parameters used for # the template creation. print('Processing the seismic data') st = pre_processing.shortproc( st, lowcut=2.0, highcut=9.0, filt_order=4, samp_rate=20.0, debug=0, num_cores=num_cores, starttime=t1, endtime=t2) # Convert from list to stream st = Stream(st) # Now we can conduct the matched-filter detection detections = match_filter.match_filter( template_names=template_names, template_list=templates, st=st, threshold=8.0, threshold_type='MAD', trig_int=6.0, plotvar=plot, plotdir='.', cores=num_cores, debug=0, plot_format='png') # Now lets try and work out how many unique events we have just to # compare with the GeoNet catalog of 20 events on this day in this # sequence for master in detections: keep = True for slave in detections: if not master == slave and abs(master.detect_time - slave.detect_time) <= 1.0: # If the events are within 1s of each other then test which # was the 'best' match, strongest detection if not master.detect_val > slave.detect_val: keep = False print('Removed detection at %s with cccsum %s' % (master.detect_time, master.detect_val)) print('Keeping detection at %s with cccsum %s' % (slave.detect_time, slave.detect_val)) break if keep: unique_detections.append(master) print('Detection at :' + str(master.detect_time) + ' for template ' + master.template_name + ' with a cross-correlation sum of: ' + str(master.detect_val)) # We can plot these too if plot: stplot = st.copy() template = templates[template_names.index( master.template_name)] lags = sorted([tr.stats.starttime for tr in template]) maxlag = lags[-1] - lags[0] stplot.trim(starttime=master.detect_time - 10, endtime=master.detect_time + maxlag + 10) plotting.detection_multiplot( stplot, template, [master.detect_time.datetime]) print('We made a total of ' + str(len(unique_detections)) + ' detections') return unique_detections
[ "def", "run_tutorial", "(", "plot", "=", "False", ",", "process_len", "=", "3600", ",", "num_cores", "=", "cpu_count", "(", ")", ")", ":", "# First we want to load our templates", "template_names", "=", "glob", ".", "glob", "(", "'tutorial_template_*.ms'", ")", ...
44.936937
20.378378
def compute_ngrams(word_list, S=3, T=3): """Compute NGrams in the word_list from [S-T) Args: word_list (list): A list of words to compute ngram set from S (int): The smallest NGram (default=3) T (int): The biggest NGram (default=3) """ _ngrams = [] if isinstance(word_list, str): word_list = [word_list] for word in word_list: for n in range(S, T+1): _ngrams += zip(*(word[i:] for i in range(n))) return [''.join(_ngram) for _ngram in _ngrams]
[ "def", "compute_ngrams", "(", "word_list", ",", "S", "=", "3", ",", "T", "=", "3", ")", ":", "_ngrams", "=", "[", "]", "if", "isinstance", "(", "word_list", ",", "str", ")", ":", "word_list", "=", "[", "word_list", "]", "for", "word", "in", "word_l...
37.571429
12
def concat(cls, variables, dim='concat_dim', positions=None, shortcut=False): """Concatenate variables along a new or existing dimension. Parameters ---------- variables : iterable of Array Arrays to stack together. Each variable is expected to have matching dimensions and shape except for along the stacked dimension. dim : str or DataArray, optional Name of the dimension to stack along. This can either be a new dimension name, in which case it is added along axis=0, or an existing dimension name, in which case the location of the dimension is unchanged. Where to insert the new dimension is determined by the first variable. positions : None or list of integer arrays, optional List of integer arrays which specifies the integer positions to which to assign each dataset along the concatenated dimension. If not supplied, objects are concatenated in the provided order. shortcut : bool, optional This option is used internally to speed-up groupby operations. If `shortcut` is True, some checks of internal consistency between arrays to concatenate are skipped. Returns ------- stacked : Variable Concatenated Variable formed by stacking all the supplied variables along the given dimension. """ if not isinstance(dim, str): dim, = dim.dims # can't do this lazily: we need to loop through variables at least # twice variables = list(variables) first_var = variables[0] arrays = [v.data for v in variables] if dim in first_var.dims: axis = first_var.get_axis_num(dim) dims = first_var.dims data = duck_array_ops.concatenate(arrays, axis=axis) if positions is not None: # TODO: deprecate this option -- we don't need it for groupby # any more. indices = nputils.inverse_permutation( np.concatenate(positions)) data = duck_array_ops.take(data, indices, axis=axis) else: axis = 0 dims = (dim,) + first_var.dims data = duck_array_ops.stack(arrays, axis=axis) attrs = OrderedDict(first_var.attrs) encoding = OrderedDict(first_var.encoding) if not shortcut: for var in variables: if var.dims != first_var.dims: raise ValueError('inconsistent dimensions') utils.remove_incompatible_items(attrs, var.attrs) return cls(dims, data, attrs, encoding)
[ "def", "concat", "(", "cls", ",", "variables", ",", "dim", "=", "'concat_dim'", ",", "positions", "=", "None", ",", "shortcut", "=", "False", ")", ":", "if", "not", "isinstance", "(", "dim", ",", "str", ")", ":", "dim", ",", "=", "dim", ".", "dims"...
41.861538
19.8
def getSpec(cls): """ Return the Spec for ApicalTMSequenceRegion. """ spec = { "description": ApicalTMSequenceRegion.__doc__, "singleNodeOnly": True, "inputs": { "activeColumns": { "description": ("An array of 0's and 1's representing the active " "minicolumns, i.e. the input to the TemporalMemory"), "dataType": "Real32", "count": 0, "required": True, "regionLevel": True, "isDefaultInput": True, "requireSplitterMap": False }, "resetIn": { "description": ("A boolean flag that indicates whether" " or not the input vector received in this compute cycle" " represents the first presentation in a" " new temporal sequence."), "dataType": "Real32", "count": 1, "required": False, "regionLevel": True, "isDefaultInput": False, "requireSplitterMap": False }, "apicalInput": { "description": "An array of 0's and 1's representing top down input." " The input will be provided to apical dendrites.", "dataType": "Real32", "count": 0, "required": False, "regionLevel": True, "isDefaultInput": False, "requireSplitterMap": False }, "apicalGrowthCandidates": { "description": ("An array of 0's and 1's representing apical input " "that can be learned on new synapses on apical " "segments. If this input is a length-0 array, the " "whole apicalInput is used."), "dataType": "Real32", "count": 0, "required": False, "regionLevel": True, "isDefaultInput": False, "requireSplitterMap": False }, }, "outputs": { "nextPredictedCells": { "description": ("A binary output containing a 1 for every " "cell that is predicted for the next timestep."), "dataType": "Real32", "count": 0, "regionLevel": True, "isDefaultOutput": False }, "predictedActiveCells": { "description": ("A binary output containing a 1 for every " "cell that transitioned from predicted to active."), "dataType": "Real32", "count": 0, "regionLevel": True, "isDefaultOutput": False }, "activeCells": { "description": ("A binary output containing a 1 for every " "cell that is currently active."), "dataType": "Real32", "count": 0, "regionLevel": True, "isDefaultOutput": True }, "winnerCells": { "description": ("A binary output containing a 1 for every " "'winner' cell in the TM."), "dataType": "Real32", "count": 0, "regionLevel": True, "isDefaultOutput": False }, }, "parameters": { # Input sizes (the network API doesn't provide these during initialize) "columnCount": { "description": ("The size of the 'activeColumns' input " + "(i.e. the number of columns)"), "accessMode": "Read", "dataType": "UInt32", "count": 1, "constraints": "" }, "apicalInputWidth": { "description": "The size of the 'apicalInput' input", "accessMode": "Read", "dataType": "UInt32", "count": 1, "constraints": "" }, "learn": { "description": "True if the TM should learn.", "accessMode": "ReadWrite", "dataType": "Bool", "count": 1, "defaultValue": "true" }, "cellsPerColumn": { "description": "Number of cells per column", "accessMode": "Read", "dataType": "UInt32", "count": 1, "constraints": "" }, "activationThreshold": { "description": ("If the number of active connected synapses on a " "segment is at least this threshold, the segment " "is said to be active."), "accessMode": "Read", "dataType": "UInt32", "count": 1, "constraints": "" }, "reducedBasalThreshold": { "description": ("Activation threshold of basal segments for cells " "with active apical segments (with apicalTiebreak " "implementation). "), "accessMode": "Read", "dataType": "UInt32", "count": 1, "constraints": "" }, "initialPermanence": { "description": "Initial permanence of a new synapse.", "accessMode": "Read", "dataType": "Real32", "count": 1, "constraints": "" }, "connectedPermanence": { "description": ("If the permanence value for a synapse is greater " "than this value, it is said to be connected."), "accessMode": "Read", "dataType": "Real32", "count": 1, "constraints": "" }, "minThreshold": { "description": ("If the number of synapses active on a segment is at " "least this threshold, it is selected as the best " "matching cell in a bursting column."), "accessMode": "Read", "dataType": "UInt32", "count": 1, "constraints": "" }, "sampleSize": { "description": ("The desired number of active synapses for an " + "active cell"), "accessMode": "Read", "dataType": "UInt32", "count": 1 }, "learnOnOneCell": { "description": ("If True, the winner cell for each column will be" " fixed between resets."), "accessMode": "Read", "dataType": "Bool", "count": 1, "defaultValue": "false" }, "maxSynapsesPerSegment": { "description": "The maximum number of synapses per segment", "accessMode": "Read", "dataType": "UInt32", "count": 1 }, "maxSegmentsPerCell": { "description": "The maximum number of segments per cell", "accessMode": "Read", "dataType": "UInt32", "count": 1 }, "permanenceIncrement": { "description": ("Amount by which permanences of synapses are " "incremented during learning."), "accessMode": "Read", "dataType": "Real32", "count": 1 }, "permanenceDecrement": { "description": ("Amount by which permanences of synapses are " "decremented during learning."), "accessMode": "Read", "dataType": "Real32", "count": 1 }, "basalPredictedSegmentDecrement": { "description": ("Amount by which active permanences of synapses of " "previously predicted but inactive segments are " "decremented."), "accessMode": "Read", "dataType": "Real32", "count": 1 }, "apicalPredictedSegmentDecrement": { "description": ("Amount by which active permanences of synapses of " "previously predicted but inactive segments are " "decremented."), "accessMode": "Read", "dataType": "Real32", "count": 1 }, "seed": { "description": "Seed for the random number generator.", "accessMode": "Read", "dataType": "UInt32", "count": 1 }, "implementation": { "description": "Apical implementation", "accessMode": "Read", "dataType": "Byte", "count": 0, "constraints": ("enum: ApicalTiebreak, ApicalTiebreakCPP, ApicalDependent"), "defaultValue": "ApicalTiebreakCPP" }, }, } return spec
[ "def", "getSpec", "(", "cls", ")", ":", "spec", "=", "{", "\"description\"", ":", "ApicalTMSequenceRegion", ".", "__doc__", ",", "\"singleNodeOnly\"", ":", "True", ",", "\"inputs\"", ":", "{", "\"activeColumns\"", ":", "{", "\"description\"", ":", "(", "\"An a...
34.186722
18.751037
def from_file (self, file, file_location, project): """ Creates a virtual target with appropriate name and type from 'file'. If a target with that name in that project was already created, returns that already created target. TODO: more correct way would be to compute path to the file, based on name and source location for the project, and use that path to determine if the target was already created. TODO: passing project with all virtual targets starts to be annoying. """ if __debug__: from .targets import ProjectTarget assert isinstance(file, basestring) assert isinstance(file_location, basestring) assert isinstance(project, ProjectTarget) # Check if we've created a target corresponding to this file. path = os.path.join(os.getcwd(), file_location, file) path = os.path.normpath(path) if path in self.files_: return self.files_ [path] file_type = b2.build.type.type (file) result = FileTarget (file, file_type, project, None, file_location) self.files_ [path] = result return result
[ "def", "from_file", "(", "self", ",", "file", ",", "file_location", ",", "project", ")", ":", "if", "__debug__", ":", "from", ".", "targets", "import", "ProjectTarget", "assert", "isinstance", "(", "file", ",", "basestring", ")", "assert", "isinstance", "(",...
44.740741
21.444444
def clear(self): """ Clears the context. """ self._objects.clear() self._class_aliases = {} self._unicodes = {} self.extra = {}
[ "def", "clear", "(", "self", ")", ":", "self", ".", "_objects", ".", "clear", "(", ")", "self", ".", "_class_aliases", "=", "{", "}", "self", ".", "_unicodes", "=", "{", "}", "self", ".", "extra", "=", "{", "}" ]
22
10.75
def entries(self): """A list of :class:`PasswordEntry` objects.""" passwords = [] for store in self.stores: passwords.extend(store.entries) return natsort(passwords, key=lambda e: e.name)
[ "def", "entries", "(", "self", ")", ":", "passwords", "=", "[", "]", "for", "store", "in", "self", ".", "stores", ":", "passwords", ".", "extend", "(", "store", ".", "entries", ")", "return", "natsort", "(", "passwords", ",", "key", "=", "lambda", "e...
37.666667
10.833333
def confirm_login(): ''' This sets the current session as fresh. Sessions become stale when they are reloaded from a cookie. ''' session['_fresh'] = True session['_id'] = current_app.login_manager._session_identifier_generator() user_login_confirmed.send(current_app._get_current_object())
[ "def", "confirm_login", "(", ")", ":", "session", "[", "'_fresh'", "]", "=", "True", "session", "[", "'_id'", "]", "=", "current_app", ".", "login_manager", ".", "_session_identifier_generator", "(", ")", "user_login_confirmed", ".", "send", "(", "current_app", ...
38.75
25.5
def _save_pys(self, filepath): """Saves file as pys file and returns True if save success Parameters ---------- filepath: String \tTarget file path for xls file """ try: with Bz2AOpen(filepath, "wb", main_window=self.main_window) as outfile: interface = Pys(self.grid.code_array, outfile) interface.from_code_array() except (IOError, ValueError), err: try: post_command_event(self.main_window, self.StatusBarMsg, text=err) return except TypeError: # The main window does not exist any more pass return not outfile.aborted
[ "def", "_save_pys", "(", "self", ",", "filepath", ")", ":", "try", ":", "with", "Bz2AOpen", "(", "filepath", ",", "\"wb\"", ",", "main_window", "=", "self", ".", "main_window", ")", "as", "outfile", ":", "interface", "=", "Pys", "(", "self", ".", "grid...
28.37037
19.444444
def scale_0to1(image_in, exclude_outliers_below=False, exclude_outliers_above=False): """Scale the two images to [0, 1] based on min/max from both. Parameters ----------- image_in : ndarray Input image exclude_outliers_{below,above} : float Lower/upper limit, a value between 0 and 100. Returns ------- scaled_image : ndarray clipped and/or scaled image """ min_value = image_in.min() max_value = image_in.max() # making a copy to ensure no side-effects image = image_in.copy() if exclude_outliers_below: perctl = float(exclude_outliers_below) image[image < np.percentile(image, perctl)] = min_value if exclude_outliers_above: perctl = float(exclude_outliers_above) image[image > np.percentile(image, 100.0 - perctl)] = max_value image = (image - min_value) / (max_value - min_value) return image
[ "def", "scale_0to1", "(", "image_in", ",", "exclude_outliers_below", "=", "False", ",", "exclude_outliers_above", "=", "False", ")", ":", "min_value", "=", "image_in", ".", "min", "(", ")", "max_value", "=", "image_in", ".", "max", "(", ")", "# making a copy t...
25.666667
20.333333
def floats(self, n: int = 2) -> List[float]: """Generate a list of random float numbers. :param n: Raise 10 to the 'n' power. :return: The list of floating-point numbers. """ nums = [self.random.random() for _ in range(10 ** int(n))] return nums
[ "def", "floats", "(", "self", ",", "n", ":", "int", "=", "2", ")", "->", "List", "[", "float", "]", ":", "nums", "=", "[", "self", ".", "random", ".", "random", "(", ")", "for", "_", "in", "range", "(", "10", "**", "int", "(", "n", ")", ")"...
33.555556
10
def register(cls, name, type_): """ Register a new type for an entry-type. The 2nd argument has to be a subclass of structures.Entry. """ if not issubclass(type_, Entry): raise exceptions.InvalidEntryType("%s is not a subclass of Entry" % str(type_)) cls._registry[name.lower()] = type_
[ "def", "register", "(", "cls", ",", "name", ",", "type_", ")", ":", "if", "not", "issubclass", "(", "type_", ",", "Entry", ")", ":", "raise", "exceptions", ".", "InvalidEntryType", "(", "\"%s is not a subclass of Entry\"", "%", "str", "(", "type_", ")", ")...
37.666667
15.666667
def acquire(self, waitflag=None): """Dummy implementation of acquire(). For blocking calls, self.locked_status is automatically set to True and returned appropriately based on value of ``waitflag``. If it is non-blocking, then the value is actually checked and not set if it is already acquired. This is all done so that threading.Condition's assert statements aren't triggered and throw a little fit. """ if waitflag is None or waitflag: self.locked_status = True return True else: if not self.locked_status: self.locked_status = True return True else: return False
[ "def", "acquire", "(", "self", ",", "waitflag", "=", "None", ")", ":", "if", "waitflag", "is", "None", "or", "waitflag", ":", "self", ".", "locked_status", "=", "True", "return", "True", "else", ":", "if", "not", "self", ".", "locked_status", ":", "sel...
36.35
15.95
def print_diff(self, summary1=None, summary2=None): """Compute diff between to summaries and print it. If no summary is provided, the diff from the last to the current summary is used. If summary1 is provided the diff from summary1 to the current summary is used. If summary1 and summary2 are provided, the diff between these two is used. """ summary.print_(self.diff(summary1=summary1, summary2=summary2))
[ "def", "print_diff", "(", "self", ",", "summary1", "=", "None", ",", "summary2", "=", "None", ")", ":", "summary", ".", "print_", "(", "self", ".", "diff", "(", "summary1", "=", "summary1", ",", "summary2", "=", "summary2", ")", ")" ]
50.555556
20.666667
def searchResults(self, REQUEST=None, used=None, **kw): """Search the catalog Search terms can be passed in the REQUEST or as keyword arguments. The used argument is now deprecated and ignored """ if REQUEST and REQUEST.get('getRequestUID') \ and self.id == CATALOG_ANALYSIS_LISTING: # Fetch all analyses that have the request UID passed in as an ancestor, # cause we want Primary ARs to always display the analyses from their # derived ARs (if result is not empty) request = REQUEST.copy() orig_uid = request.get('getRequestUID') # If a list of request uid, retrieve them sequentially to make the # masking process easier if isinstance(orig_uid, list): results = list() for uid in orig_uid: request['getRequestUID'] = [uid] results += self.searchResults(REQUEST=request, used=used, **kw) return results # Get all analyses, those from descendant ARs included del request['getRequestUID'] request['getAncestorsUIDs'] = orig_uid results = self.searchResults(REQUEST=request, used=used, **kw) # Masking primary = filter(lambda an: an.getParentUID == orig_uid, results) derived = filter(lambda an: an.getParentUID != orig_uid, results) derived_keys = map(lambda an: an.getKeyword, derived) results = filter(lambda an: an.getKeyword not in derived_keys, primary) return results + derived # Normal search return self._catalog.searchResults(REQUEST, used, **kw)
[ "def", "searchResults", "(", "self", ",", "REQUEST", "=", "None", ",", "used", "=", "None", ",", "*", "*", "kw", ")", ":", "if", "REQUEST", "and", "REQUEST", ".", "get", "(", "'getRequestUID'", ")", "and", "self", ".", "id", "==", "CATALOG_ANALYSIS_LIS...
38.487805
21.804878
def table_width(outer_widths, outer_border, inner_border): """Determine the width of the entire table including borders and padding. :param iter outer_widths: List of widths (with padding) for each column. :param int outer_border: Sum of left and right outer border visible widths. :param int inner_border: Visible width of the inner border character. :return: The width of the table. :rtype: int """ column_count = len(outer_widths) # Count how much space outer and inner borders take up. non_data_space = outer_border if column_count: non_data_space += inner_border * (column_count - 1) # Space of all columns and their padding. data_space = sum(outer_widths) return data_space + non_data_space
[ "def", "table_width", "(", "outer_widths", ",", "outer_border", ",", "inner_border", ")", ":", "column_count", "=", "len", "(", "outer_widths", ")", "# Count how much space outer and inner borders take up.", "non_data_space", "=", "outer_border", "if", "column_count", ":"...
37.25
19.85
def RemoveBackground(EPIC, campaign=None): ''' Returns :py:obj:`True` or :py:obj:`False`, indicating whether or not to remove the background flux for the target. If ``campaign < 3``, returns :py:obj:`True`, otherwise returns :py:obj:`False`. ''' if campaign is None: campaign = Campaign(EPIC) if hasattr(campaign, '__len__'): raise AttributeError( "Please choose a campaign/season for this target: %s." % campaign) if campaign < 3: return True else: return False
[ "def", "RemoveBackground", "(", "EPIC", ",", "campaign", "=", "None", ")", ":", "if", "campaign", "is", "None", ":", "campaign", "=", "Campaign", "(", "EPIC", ")", "if", "hasattr", "(", "campaign", ",", "'__len__'", ")", ":", "raise", "AttributeError", "...
31.058824
23.529412
def usearch61_fast_cluster(intermediate_fasta, percent_id=0.97, minlen=64, output_dir=".", remove_usearch_logs=False, wordlength=8, usearch61_maxrejects=8, usearch61_maxaccepts=1, HALT_EXEC=False, output_uc_filepath=None, log_name="fast_clustered.log", threads=1.0): """ Performs usearch61 de novo fast clustering via cluster_fast option Only supposed to be used with length sorted data (and performs length sorting automatically) and does not support reverse strand matching intermediate_fasta: fasta filepath to be clustered with usearch61 percent_id: percentage id to cluster at minlen: minimum sequence length output_dir: directory to output log, OTU mapping, and intermediate files remove_usearch_logs: Saves usearch log files wordlength: word length to use for initial high probability sequence matches usearch61_maxrejects: Set to 'default' or an int value specifying max rejects usearch61_maxaccepts: Number of accepts allowed by usearch61 HALT_EXEC: application controller option to halt execution output_uc_filepath: Path to write clusters (.uc) file. log_name: filepath to write usearch61 generated log file threads: Specify number of threads used per core per CPU """ log_filepath = join(output_dir, log_name) params = {'--minseqlength': minlen, '--cluster_fast': intermediate_fasta, '--id': percent_id, '--uc': output_uc_filepath, '--wordlength': wordlength, '--maxrejects': usearch61_maxrejects, '--maxaccepts': usearch61_maxaccepts, '--usersort': True, '--threads': threads } if not remove_usearch_logs: params['--log'] = log_filepath clusters_fp = output_uc_filepath app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC) app_result = app() return clusters_fp, app_result
[ "def", "usearch61_fast_cluster", "(", "intermediate_fasta", ",", "percent_id", "=", "0.97", ",", "minlen", "=", "64", ",", "output_dir", "=", "\".\"", ",", "remove_usearch_logs", "=", "False", ",", "wordlength", "=", "8", ",", "usearch61_maxrejects", "=", "8", ...
39.727273
17.2
def select_by_visible_text(self, text): """Select all options that display text matching the argument. That is, when given "Bar" this would select an option like: <option value="foo">Bar</option> :Args: - text - The visible text to match against throws NoSuchElementException If there is no option with specified text in SELECT """ xpath = ".//option[normalize-space(.) = %s]" % self._escapeString(text) opts = self._el.find_elements(By.XPATH, xpath) matched = False for opt in opts: self._setSelected(opt) if not self.is_multiple: return matched = True if len(opts) == 0 and " " in text: subStringWithoutSpace = self._get_longest_token(text) if subStringWithoutSpace == "": candidates = self.options else: xpath = ".//option[contains(.,%s)]" % self._escapeString(subStringWithoutSpace) candidates = self._el.find_elements(By.XPATH, xpath) for candidate in candidates: if text == candidate.text: self._setSelected(candidate) if not self.is_multiple: return matched = True if not matched: raise NoSuchElementException("Could not locate element with visible text: %s" % text)
[ "def", "select_by_visible_text", "(", "self", ",", "text", ")", ":", "xpath", "=", "\".//option[normalize-space(.) = %s]\"", "%", "self", ".", "_escapeString", "(", "text", ")", "opts", "=", "self", ".", "_el", ".", "find_elements", "(", "By", ".", "XPATH", ...
39.666667
18.472222
def BeginEdit(self, row, col, grid): """ Fetch the value from the table and prepare the edit control to begin editing. Set the focus to the edit control. *Must Override* """ # Disable if cell is locked, enable if cell is not locked grid = self.main_window.grid key = grid.actions.cursor locked = grid.code_array.cell_attributes[key]["locked"]or \ grid.code_array.cell_attributes[key]["button_cell"] self._tc.Enable(not locked) self._tc.Show(not locked) if locked: grid.code_array.result_cache.clear() self._execute_cell_code(row, col, grid) # Mirror our changes onto the main_window's code bar self._tc.Bind(wx.EVT_CHAR, self.OnChar) self._tc.Bind(wx.EVT_KEY_UP, self.OnKeyUp) # Save cell and grid info self._row = row self._col = [col, ] # List of columns we are occupying self._grid = grid start_value = grid.GetTable().GetValue(*key) try: start_value_list = [start_value[i:i+self.max_char_width] for i in xrange(0, len(start_value), self.max_char_width)] startValue = "\n".join(start_value_list) self.startValue = startValue except TypeError: self.startValue = u"" # Set up the textcontrol to look like this cell (TODO: Does not work) try: self._tc.SetValue(unicode(startValue)) except (TypeError, AttributeError, UnboundLocalError): self._tc.SetValue(u"") self._tc.SetFont(grid.GetCellFont(row, col)) self._tc.SetBackgroundColour(grid.GetCellBackgroundColour(row, col)) self._update_control_length() self._tc.SetInsertionPointEnd() h_scroll_pos = grid.GetScrollPos(wx.SB_HORIZONTAL) v_scroll_pos = grid.GetScrollPos(wx.SB_VERTICAL) self._tc.SetFocus() # GTK Bugfix for jumping grid when focusing the textctrl if grid.GetScrollPos(wx.SB_HORIZONTAL) != h_scroll_pos or \ grid.GetScrollPos(wx.SB_VERTICAL) != v_scroll_pos: wx.ScrolledWindow.Scroll(grid, (h_scroll_pos, v_scroll_pos)) # Select the text self._tc.SetSelection(0, self._tc.GetLastPosition())
[ "def", "BeginEdit", "(", "self", ",", "row", ",", "col", ",", "grid", ")", ":", "# Disable if cell is locked, enable if cell is not locked", "grid", "=", "self", ".", "main_window", ".", "grid", "key", "=", "grid", ".", "actions", ".", "cursor", "locked", "=",...
34.954545
21.924242
def _make_auth(self, method, date, nonce, path, query={}, ctype='application/json'): ''' Create the request signature to authenticate Args: - method (str): HTTP method - date (str): HTTP date header string - nonce (str): Cryptographic nonce - path (str): URL pathname - query (dict, default={}): URL query string in key-value pairs - ctype (str, default='application/json'): HTTP Content-Type ''' query = urlencode(query) hmac_str = (method + '\n' + nonce + '\n' + date + '\n' + ctype + '\n' + path + '\n' + query + '\n').lower().encode('utf-8') signature = base64.b64encode(hmac.new(self._secret_key, hmac_str, digestmod=hashlib.sha256).digest()) auth = 'On ' + self._access_key.decode('utf-8') + ':HmacSHA256:' + signature.decode('utf-8') if self._logging: utils.log({ 'query': query, 'hmac_str': hmac_str, 'signature': signature, 'auth': auth }) return auth
[ "def", "_make_auth", "(", "self", ",", "method", ",", "date", ",", "nonce", ",", "path", ",", "query", "=", "{", "}", ",", "ctype", "=", "'application/json'", ")", ":", "query", "=", "urlencode", "(", "query", ")", "hmac_str", "=", "(", "method", "+"...
36.566667
25.9
def load_dynamic_config(config_file=DEFAULT_DYNAMIC_CONFIG_FILE): """Load and parse dynamic config""" dynamic_configurations = {} # Insert config path so we can import it sys.path.insert(0, path.dirname(path.abspath(config_file))) try: config_module = __import__('config') dynamic_configurations = config_module.CONFIG except ImportError: # Provide a default if config not found LOG.error('ImportError: Unable to load dynamic config. Check config.py file imports!') return dynamic_configurations
[ "def", "load_dynamic_config", "(", "config_file", "=", "DEFAULT_DYNAMIC_CONFIG_FILE", ")", ":", "dynamic_configurations", "=", "{", "}", "# Insert config path so we can import it", "sys", ".", "path", ".", "insert", "(", "0", ",", "path", ".", "dirname", "(", "path"...
36.266667
21
def show_rendered_files(results_dict): """ Parses a nested dictionary returned from :meth:`Hub.render` and just prints the resulting files. """ for k, v in results_dict.items(): if isinstance(v, string_types): print("rendered file: %s (created by: %s)" % (v, k)) else: show_rendered_files(v) return
[ "def", "show_rendered_files", "(", "results_dict", ")", ":", "for", "k", ",", "v", "in", "results_dict", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "string_types", ")", ":", "print", "(", "\"rendered file: %s (created by: %s)\"", "%", "(...
32
13.454545
def validate_query(self, using=None, **kwargs): """ Validate a potentially expensive query without executing it. Any additional keyword arguments will be passed to ``Elasticsearch.indices.validate_query`` unchanged. """ return self._get_connection(using).indices.validate_query(index=self._name, **kwargs)
[ "def", "validate_query", "(", "self", ",", "using", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_get_connection", "(", "using", ")", ".", "indices", ".", "validate_query", "(", "index", "=", "self", ".", "_name", ",", "*", ...
43.375
20.625
def get_sigma_tables(self, imt, rctx, stddev_types): """ Returns modification factors for the standard deviations, given the rupture and intensity measure type. :returns: List of standard deviation modification tables, each as an array of [Number Distances, Number Levels] """ output_tables = [] for stddev_type in stddev_types: # For PGA and PGV only needs to apply magnitude interpolation if imt.name in 'PGA PGV': interpolator = interp1d(self.magnitudes, self.sigma[stddev_type][imt.name], axis=2) output_tables.append( interpolator(rctx.mag).reshape(self.shape[0], self.shape[3])) else: # For spectral accelerations - need two step process # Interpolate period interpolator = interp1d(numpy.log10(self.periods), self.sigma[stddev_type]["SA"], axis=1) period_table = interpolator(numpy.log10(imt.period)) mag_interpolator = interp1d(self.magnitudes, period_table, axis=1) output_tables.append(mag_interpolator(rctx.mag)) return output_tables
[ "def", "get_sigma_tables", "(", "self", ",", "imt", ",", "rctx", ",", "stddev_types", ")", ":", "output_tables", "=", "[", "]", "for", "stddev_type", "in", "stddev_types", ":", "# For PGA and PGV only needs to apply magnitude interpolation", "if", "imt", ".", "name"...
45.030303
19.030303
def batch_size(self): """int: The number of results to fetch per batch. Clamped to limit if limit is set and is smaller than the given batch size. """ batch_size = self.get("batch_size", DEFAULT_BATCH_SIZE) if self.limit is not None: return min(self.limit, batch_size) return batch_size
[ "def", "batch_size", "(", "self", ")", ":", "batch_size", "=", "self", ".", "get", "(", "\"batch_size\"", ",", "DEFAULT_BATCH_SIZE", ")", "if", "self", ".", "limit", "is", "not", "None", ":", "return", "min", "(", "self", ".", "limit", ",", "batch_size",...
38.555556
13.444444
def process_index(self, url, page): """Process the contents of a PyPI page""" def scan(link): # Process a URL to see if it's for a package page if link.startswith(self.index_url): parts = list(map( urllib.parse.unquote, link[len(self.index_url):].split('/') )) if len(parts) == 2 and '#' not in parts[1]: # it's a package page, sanitize and index it pkg = safe_name(parts[0]) ver = safe_version(parts[1]) self.package_pages.setdefault(pkg.lower(), {})[link] = True return to_filename(pkg), to_filename(ver) return None, None # process an index page into the package-page index for match in HREF.finditer(page): try: scan(urllib.parse.urljoin(url, htmldecode(match.group(1)))) except ValueError: pass pkg, ver = scan(url) # ensure this page is in the page index if pkg: # process individual package page for new_url in find_external_links(url, page): # Process the found URL base, frag = egg_info_for_url(new_url) if base.endswith('.py') and not frag: if ver: new_url += '#egg=%s-%s' % (pkg, ver) else: self.need_version_info(url) self.scan_url(new_url) return PYPI_MD5.sub( lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1, 3, 2), page ) else: return ""
[ "def", "process_index", "(", "self", ",", "url", ",", "page", ")", ":", "def", "scan", "(", "link", ")", ":", "# Process a URL to see if it's for a package page", "if", "link", ".", "startswith", "(", "self", ".", "index_url", ")", ":", "parts", "=", "list",...
39.595238
19.047619
def argmax(attrs, inputs, proto_obj): """Returns indices of the maximum values along an axis""" axis = attrs.get('axis', 0) keepdims = attrs.get('keepdims', 1) argmax_op = symbol.argmax(inputs[0], axis=axis, keepdims=keepdims) # onnx argmax operator always expects int64 as output type cast_attrs = {'dtype': 'int64'} return 'cast', cast_attrs, argmax_op
[ "def", "argmax", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "axis", "=", "attrs", ".", "get", "(", "'axis'", ",", "0", ")", "keepdims", "=", "attrs", ".", "get", "(", "'keepdims'", ",", "1", ")", "argmax_op", "=", "symbol", ".", "argm...
46.875
8.75
def compile_create(self, blueprint, command, _): """ Compile a create table command. """ columns = ', '.join(self._get_columns(blueprint)) sql = 'CREATE TABLE %s (%s' % (self.wrap_table(blueprint), columns) sql += self._add_foreign_keys(blueprint) sql += self._add_primary_keys(blueprint) return sql + ')'
[ "def", "compile_create", "(", "self", ",", "blueprint", ",", "command", ",", "_", ")", ":", "columns", "=", "', '", ".", "join", "(", "self", ".", "_get_columns", "(", "blueprint", ")", ")", "sql", "=", "'CREATE TABLE %s (%s'", "%", "(", "self", ".", "...
27.769231
19.461538
def exvp(x, y, x0, y0, c2, c4, theta0, ff): """Convert virtual pixel(s) to real pixel(s). This function makes use of exvp_scalar(), which performs the conversion for a single point (x, y), over an array of X and Y values. Parameters ---------- x : array-like X coordinate (pixel). y : array-like Y coordinate (pixel). x0 : float X coordinate of reference pixel. y0 : float Y coordinate of reference pixel. c2 : float Coefficient corresponding to the term r**2 in distortion equation. c4 : float Coefficient corresponding to the term r**4 in distortion equation. theta0 : float Additional rotation angle (radians). ff : float Scaling factor to be applied to the Y axis. Returns ------- xdist, ydist : tuple of floats (or two arrays of floats) Distorted coordinates. """ if all([np.isscalar(x), np.isscalar(y)]): xdist, ydist = exvp_scalar(x, y, x0=x0, y0=y0, c2=c2, c4=c4, theta0=theta0, ff=ff) return xdist, ydist elif any([np.isscalar(x), np.isscalar(y)]): raise ValueError("invalid mixture of scalars and arrays") else: xdist = [] ydist = [] for x_, y_ in zip(x, y): xdist_, ydist_ = exvp_scalar(x_, y_, x0=x0, y0=y0, c2=c2, c4=c4, theta0=theta0, ff=ff) xdist.append(xdist_) ydist.append(ydist_) return np.array(xdist), np.array(ydist)
[ "def", "exvp", "(", "x", ",", "y", ",", "x0", ",", "y0", ",", "c2", ",", "c4", ",", "theta0", ",", "ff", ")", ":", "if", "all", "(", "[", "np", ".", "isscalar", "(", "x", ")", ",", "np", ".", "isscalar", "(", "y", ")", "]", ")", ":", "x...
30.66
20.14
def custom_getter_scope(custom_getter): """ Args: custom_getter: the same as in :func:`tf.get_variable` Returns: The current variable scope with a custom_getter. """ scope = tf.get_variable_scope() if get_tf_version_tuple() >= (1, 5): with tf.variable_scope( scope, custom_getter=custom_getter, auxiliary_name_scope=False): yield else: ns = tf.get_default_graph().get_name_scope() with tf.variable_scope( scope, custom_getter=custom_getter): with tf.name_scope(ns + '/' if ns else ''): yield
[ "def", "custom_getter_scope", "(", "custom_getter", ")", ":", "scope", "=", "tf", ".", "get_variable_scope", "(", ")", "if", "get_tf_version_tuple", "(", ")", ">=", "(", "1", ",", "5", ")", ":", "with", "tf", ".", "variable_scope", "(", "scope", ",", "cu...
31.45
14.35
def get_player_img(player_id): """ Returns the image of the player from stats.nba.com as a numpy array and saves the image as PNG file in the current directory. Parameters ---------- player_id: int The player ID used to find the image. Returns ------- player_img: ndarray The multidimensional numpy array of the player image, which matplotlib can plot. """ url = "http://stats.nba.com/media/players/230x185/"+str(player_id)+".png" img_file = str(player_id) + ".png" pic = urlretrieve(url, img_file) player_img = plt.imread(pic[0]) return player_img
[ "def", "get_player_img", "(", "player_id", ")", ":", "url", "=", "\"http://stats.nba.com/media/players/230x185/\"", "+", "str", "(", "player_id", ")", "+", "\".png\"", "img_file", "=", "str", "(", "player_id", ")", "+", "\".png\"", "pic", "=", "urlretrieve", "("...
29.238095
20.190476
def add_to(self, parent, **kwargs): # type: (Part, **Any) -> Part """Add a new instance of this model to a part. This works if the current part is a model and an instance of this model is to be added to a part instances in the tree. In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as additional keyword=value argument to this method. This will improve performance of the backend against a trade-off that someone looking at the frontend won't notice any changes unless the page is refreshed. :param parent: part to add the new instance to :param kwargs: (optional) additional kwargs that will be passed in the during the edit/update request :type kwargs: dict or None :type parent: :class:`Part` :param kwargs: (optional) additional keyword=value arguments :type kwargs: dict :return: :class:`Part` with category `INSTANCE` :raises APIError: if unable to add the new child instance Example ------- >>> wheel_model = project.model('wheel') >>> bike = project.part('Bike') >>> wheel_model.add_to(bike) """ if self.category != Category.MODEL: raise APIError("Part should be of category MODEL") return self._client.create_part(parent, self, **kwargs)
[ "def", "add_to", "(", "self", ",", "parent", ",", "*", "*", "kwargs", ")", ":", "# type: (Part, **Any) -> Part", "if", "self", ".", "category", "!=", "Category", ".", "MODEL", ":", "raise", "APIError", "(", "\"Part should be of category MODEL\"", ")", "return", ...
42.84375
25
async def findTask(self, *args, **kwargs): """ Find Indexed Task Find a task by index path, returning the highest-rank task with that path. If no task exists for the given path, this API end-point will respond with a 404 status. This method gives output: ``v1/indexed-task-response.json#`` This method is ``stable`` """ return await self._makeApiCall(self.funcinfo["findTask"], *args, **kwargs)
[ "async", "def", "findTask", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "await", "self", ".", "_makeApiCall", "(", "self", ".", "funcinfo", "[", "\"findTask\"", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
34.615385
27.076923
def calculateLocalElasticitySegments(self, bp, span=2, frameGap=None, helical=False, unit='kT', err_type='block', tool='gmx analyze', outFile=None): """Calculate local elastic properties of consecutive overlapped DNA segments Calculate local elastic properties of consecutive overlapped DNA segments of length given by `span`. Parameters ---------- bp : list List of two base-steps forming the global DNA segment. For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered. span : int Length of overlapping (local) DNA segments. It should be less than four. frameGap : int How many frames to skip for next time-frame. Lower the number, slower will be the calculation. helical : bool If ``helical=True``, elastic matrix for **helical base-step** parameters are calculated. Otherwise, by default, elastic matrix for **base-step** parameters are calculated. unit : str Unit of energy. Allowed units are: ``'kT', 'kJ/mol' and 'kcal/mol'``. err_type : str Error estimation by autocorrelation method ``err_type='acf'`` or block averaging method ``err_type='block'`` tool : str GROMACS tool to calculate error. In older versions it is `g_analyze` while in newer versions (above 2016) it is `gmx analyze`. outFile : str Output file in csv format. Returns ------- segments : list list of DNA segments for which local elastic properties was calculated. elasticities : OrderedDict A ordered dictionary of 1D arrays of shape (segments). The keys in dictionary are name of the elasticity in the same order as listed above. error : OrderedDict A ordered dictionary of 1D arrays of shape (segments). The keys in dictionary are name of the elasticity in the same order as listed above.. """ if helical: props_name = helical_local_props_vector else: props_name = local_props_vector segments, errors, elasticities = [], OrderedDict(), OrderedDict() for name in props_name: elasticities[name] = [] errors[name] = [] for s in range(bp[0], bp[1]): if s+span-1 > bp[1]: break time, elasticity_t = self.getLocalElasticityByTime([s, s+span-1], frameGap=frameGap, helical=helical, unit=unit) error_t = dnaMD.get_error(time, list(elasticity_t.values()), len(props_name), err_type=err_type, tool=tool) for i in range(len(props_name)): esy_t = elasticity_t[props_name[i]][-1] # only take last entry elasticities[props_name[i]].append(esy_t) errors[props_name[i]].append(error_t[i]) segments.append('{0}-{1}'.format(s, s+span-1)) # Write output file if outFile is not None: with open(outFile, 'w') as fout: fout.write('#bps') for name in props_name: fout.write(', {0}, {0}-error'.format(name)) fout.write('\n') for s in range(len(segments)): fout.write('{0}'.format(segments[s])) for name in props_name: fout.write(', {0:.5f}, {1:.5f}'.format(elasticities[name][s], errors[name][s])) fout.write('\n') return segments, elasticities, errors
[ "def", "calculateLocalElasticitySegments", "(", "self", ",", "bp", ",", "span", "=", "2", ",", "frameGap", "=", "None", ",", "helical", "=", "False", ",", "unit", "=", "'kT'", ",", "err_type", "=", "'block'", ",", "tool", "=", "'gmx analyze'", ",", "outF...
42.047059
28.176471
def put(self,specimen,coordinate_system,new_pars): """ Given a coordinate system and a new parameters dictionary that follows pmagpy convention given by the pmag.py/domean function it alters this fit's bounds and parameters such that it matches the new data. @param: specimen -> None if fit is for a site or a sample or a valid specimen from self.GUI @param: coordinate_system -> the coordinate system to alter @param: new_pars -> the new paramters to change your fit to @alters: tmin, tmax, pars, geopars, tiltpars, PCA_type """ if specimen != None: if type(new_pars)==dict: if 'er_specimen_name' not in list(new_pars.keys()): new_pars['er_specimen_name'] = specimen if 'specimen_comp_name' not in list(new_pars.keys()): new_pars['specimen_comp_name'] = self.name if type(new_pars) != dict or 'measurement_step_min' not in list(new_pars.keys()) or 'measurement_step_max' not in list(new_pars.keys()) or 'calculation_type' not in list(new_pars.keys()): print("-E- invalid parameters cannot assign to fit %s for specimen %s - was given:\n%s"%(self.name,specimen,str(new_pars))) return self.get(coordinate_system) self.tmin = new_pars['measurement_step_min'] self.tmax = new_pars['measurement_step_max'] self.PCA_type = new_pars['calculation_type'] if self.GUI!=None: steps = self.GUI.Data[specimen]['zijdblock_steps'] tl = [self.tmin,self.tmax] for i,t in enumerate(tl): if str(t) in steps: tl[i] = str(t) elif str(int(t)) in steps: tl[i] = str(int(t)) elif "%.1fmT"%t in steps: tl[i] = "%.1fmT"%t elif "%.0fC"%t in steps: tl[i] = "%.0fC"%t else: print("-E- Step " + str(tl[i]) + " does not exsist (func: Fit.put)") tl[i] = str(t) self.tmin,self.tmax = tl elif meas_data != None: steps = meas_data[specimen]['zijdblock_steps'] tl = [self.tmin,self.tmax] for i,t in enumerate(tl): if str(t) in steps: tl[i] = str(t) elif str(int(t)) in steps: tl[i] = str(int(t)) elif "%.1fmT"%t in steps: tl[i] = "%.1fmT"%t elif "%.0fC"%t in steps: tl[i] = "%.0fC"%t else: print("-E- Step " + str(tl[i]) + " does not exsist (func: Fit.put)") tl[i] = str(t) self.tmin,self.tmax = tl else: self.tmin,self.tmax = list(map(str, tl)) if coordinate_system == 'DA-DIR' or coordinate_system == 'specimen': self.pars = new_pars elif coordinate_system == 'DA-DIR-GEO' or coordinate_system == 'geographic': self.geopars = new_pars elif coordinate_system == 'DA-DIR-TILT' or coordinate_system == 'tilt-corrected': self.tiltpars = new_pars else: print('-E- no such coordinate system could not assign those parameters to fit')
[ "def", "put", "(", "self", ",", "specimen", ",", "coordinate_system", ",", "new_pars", ")", ":", "if", "specimen", "!=", "None", ":", "if", "type", "(", "new_pars", ")", "==", "dict", ":", "if", "'er_specimen_name'", "not", "in", "list", "(", "new_pars",...
55.894737
26.350877
async def update_watermark(self, update_watermark_request): """Update the watermark (read timestamp) of a conversation.""" response = hangouts_pb2.UpdateWatermarkResponse() await self._pb_request('conversations/updatewatermark', update_watermark_request, response) return response
[ "async", "def", "update_watermark", "(", "self", ",", "update_watermark_request", ")", ":", "response", "=", "hangouts_pb2", ".", "UpdateWatermarkResponse", "(", ")", "await", "self", ".", "_pb_request", "(", "'conversations/updatewatermark'", ",", "update_watermark_req...
56.333333
17
def p_annotation_spdx_id_1(self, p): """annotation_spdx_id : ANNOTATION_SPDX_ID LINE""" try: if six.PY2: value = p[2].decode(encoding='utf-8') else: value = p[2] self.builder.set_annotation_spdx_id(self.document, value) except CardinalityError: self.more_than_one_error('SPDXREF', p.lineno(1)) except OrderError: self.order_error('SPDXREF', 'Annotator', p.lineno(1))
[ "def", "p_annotation_spdx_id_1", "(", "self", ",", "p", ")", ":", "try", ":", "if", "six", ".", "PY2", ":", "value", "=", "p", "[", "2", "]", ".", "decode", "(", "encoding", "=", "'utf-8'", ")", "else", ":", "value", "=", "p", "[", "2", "]", "s...
39.916667
16.083333
def indices_to_points(indices, pitch, origin): """ Convert indices of an (n,m,p) matrix into a set of voxel center points. Parameters ---------- indices: (q, 3) int, index of voxel matrix (n,m,p) pitch: float, what pitch was the voxel matrix computed with origin: (3,) float, what is the origin of the voxel matrix Returns ---------- points: (q, 3) float, list of points """ indices = np.asanyarray(indices, dtype=np.float64) origin = np.asanyarray(origin, dtype=np.float64) pitch = float(pitch) if indices.shape != (indices.shape[0], 3): from IPython import embed embed() raise ValueError('shape of indices must be (q, 3)') if origin.shape != (3,): raise ValueError('shape of origin must be (3,)') points = indices * pitch + origin return points
[ "def", "indices_to_points", "(", "indices", ",", "pitch", ",", "origin", ")", ":", "indices", "=", "np", ".", "asanyarray", "(", "indices", ",", "dtype", "=", "np", ".", "float64", ")", "origin", "=", "np", ".", "asanyarray", "(", "origin", ",", "dtype...
29.571429
20
def nl_send_simple(sk, type_, flags, buf=None, size=0): """Construct and transmit a Netlink message. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L549 Allocates a new Netlink message based on `type_` and `flags`. If `buf` points to payload of length `size` that payload will be appended to the message. Sends out the message using `nl_send_auto()`. Positional arguments: sk -- Netlink socket (nl_sock class instance). type_ -- Netlink message type (integer). flags -- Netlink message flags (integer). Keyword arguments: buf -- payload data. size -- size of `data` (integer). Returns: Number of characters sent on success or a negative error code. """ msg = nlmsg_alloc_simple(type_, flags) if buf is not None and size: err = nlmsg_append(msg, buf, size, NLMSG_ALIGNTO) if err < 0: return err return nl_send_auto(sk, msg)
[ "def", "nl_send_simple", "(", "sk", ",", "type_", ",", "flags", ",", "buf", "=", "None", ",", "size", "=", "0", ")", ":", "msg", "=", "nlmsg_alloc_simple", "(", "type_", ",", "flags", ")", "if", "buf", "is", "not", "None", "and", "size", ":", "err"...
32.607143
20.285714
def _flatten_models(tasklist): " Create 1d-array containing all disctinct models from ``tasklist``. " ans = gvar.BufferDict() for task, mlist in tasklist: if task != 'fit': continue for m in mlist: id_m = id(m) if id_m not in ans: ans[id_m] = m return ans.buf.tolist()
[ "def", "_flatten_models", "(", "tasklist", ")", ":", "ans", "=", "gvar", ".", "BufferDict", "(", ")", "for", "task", ",", "mlist", "in", "tasklist", ":", "if", "task", "!=", "'fit'", ":", "continue", "for", "m", "in", "mlist", ":", "id_m", "=", "id",...
34.727273
12.181818
def page(self, course): """ Get all data and display the page """ data = list(self.database.user_tasks.aggregate( [ { "$match": { "courseid": course.get_id(), "username": {"$in": self.user_manager.get_course_registered_users(course, False)} } }, { "$group": { "_id": "$taskid", "viewed": {"$sum": 1}, "attempted": {"$sum": {"$cond": [{"$ne": ["$tried", 0]}, 1, 0]}}, "attempts": {"$sum": "$tried"}, "succeeded": {"$sum": {"$cond": ["$succeeded", 1, 0]}} } } ])) # Load tasks and verify exceptions files = self.task_factory.get_readable_tasks(course) output = {} errors = [] for task in files: try: output[task] = course.get_task(task) except Exception as inst: errors.append({"taskid": task, "error": str(inst)}) tasks = OrderedDict(sorted(list(output.items()), key=lambda t: (t[1].get_order(), t[1].get_id()))) # Now load additional informations result = OrderedDict() for taskid in tasks: result[taskid] = {"name": tasks[taskid].get_name(self.user_manager.session_language()), "viewed": 0, "attempted": 0, "attempts": 0, "succeeded": 0, "url": self.submission_url_generator(taskid)} for entry in data: if entry["_id"] in result: result[entry["_id"]]["viewed"] = entry["viewed"] result[entry["_id"]]["attempted"] = entry["attempted"] result[entry["_id"]]["attempts"] = entry["attempts"] result[entry["_id"]]["succeeded"] = entry["succeeded"] if "csv" in web.input(): return make_csv(result) return self.template_helper.get_renderer().course_admin.task_list(course, result, errors)
[ "def", "page", "(", "self", ",", "course", ")", ":", "data", "=", "list", "(", "self", ".", "database", ".", "user_tasks", ".", "aggregate", "(", "[", "{", "\"$match\"", ":", "{", "\"courseid\"", ":", "course", ".", "get_id", "(", ")", ",", "\"userna...
44.875
23.6875
def impute(X, value=None, train=None, dropna=True, inplace=True): """ Performs mean imputation on a pandas dataframe. Args: train: an optional training mask with which to compute the mean value: instead of computing the mean, use this as the value argument to fillna dropna: whether to drop all null columns inplace: whether to perform the imputation inplace Returns: the imputed DataFrame """ if value is None: Xfit = X[train] if train is not None else X value = Xfit.mean() else: if train is not None: raise ValueError("Cannot pass both train and value arguments") if dropna: null_columns = value.index[value.isnull()] if len(null_columns) > 0: logging.info('Dropping null columns: \n\t%s' % null_columns) if inplace: X.drop(null_columns, axis=1, inplace=True) else: X = X.drop(null_columns, axis=1, inplace=False) if inplace: X.fillna(value.dropna(), inplace=True) else: X = X.fillna(value.dropna(), inplace=False) return X
[ "def", "impute", "(", "X", ",", "value", "=", "None", ",", "train", "=", "None", ",", "dropna", "=", "True", ",", "inplace", "=", "True", ")", ":", "if", "value", "is", "None", ":", "Xfit", "=", "X", "[", "train", "]", "if", "train", "is", "not...
34.6875
21
def _param(self): """ Get/Set a parameter. """ class Parameters(object): def __getitem__(_self, name): return self.getParameter(name) def __setitem__(_self, name, value): if isinstance(value, (float, int, basestring)): self.getParameter(name).set(value) else: self.getParameter(name).setValues(value) def __iter__(_self): return self.getParameters() return Parameters()
[ "def", "_param", "(", "self", ")", ":", "class", "Parameters", "(", "object", ")", ":", "def", "__getitem__", "(", "_self", ",", "name", ")", ":", "return", "self", ".", "getParameter", "(", "name", ")", "def", "__setitem__", "(", "_self", ",", "name",...
29.722222
15.388889
def error_buckets(gold, pred, X=None): """Group items by error buckets Args: gold: an array-like of gold labels (ints) pred: an array-like of predictions (ints) X: an iterable of items Returns: buckets: A dict of items where buckets[i,j] is a list of items with predicted label i and true label j. If X is None, return indices instead. For a binary problem with (1=positive, 2=negative): buckets[1,1] = true positives buckets[1,2] = false positives buckets[2,1] = false negatives buckets[2,2] = true negatives """ buckets = defaultdict(list) gold = arraylike_to_numpy(gold) pred = arraylike_to_numpy(pred) for i, (y, l) in enumerate(zip(pred, gold)): buckets[y, l].append(X[i] if X is not None else i) return buckets
[ "def", "error_buckets", "(", "gold", ",", "pred", ",", "X", "=", "None", ")", ":", "buckets", "=", "defaultdict", "(", "list", ")", "gold", "=", "arraylike_to_numpy", "(", "gold", ")", "pred", "=", "arraylike_to_numpy", "(", "pred", ")", "for", "i", ",...
34.625
14.625
def create_line_generator(self): """ Creates a generator function yielding lines in the file Should only yield non-empty lines """ if self.file_name.endswith(".gz"): if sys.version_info.major == 3: gz = gzip.open(self.file_name, mode='rt', encoding=self.encoding) else: gz = gzip.open(self.file_name, mode='rt') for line in gz.readlines(): yield line gz.close() else: if sys.version_info.major == 3: # Python 3 native `open` is much faster file = open(self.file_name, mode='r', encoding=self.encoding) else: # Python 2 needs the codecs package to deal with encoding file = codecs.open(self.file_name, mode='r', encoding=self.encoding) for line in file: yield line file.close()
[ "def", "create_line_generator", "(", "self", ")", ":", "if", "self", ".", "file_name", ".", "endswith", "(", "\".gz\"", ")", ":", "if", "sys", ".", "version_info", ".", "major", "==", "3", ":", "gz", "=", "gzip", ".", "open", "(", "self", ".", "file_...
33.071429
20.5
def validateElement(self, ctxt, elem): """Try to validate the subtree under an element """ if ctxt is None: ctxt__o = None else: ctxt__o = ctxt._o if elem is None: elem__o = None else: elem__o = elem._o ret = libxml2mod.xmlValidateElement(ctxt__o, self._o, elem__o) return ret
[ "def", "validateElement", "(", "self", ",", "ctxt", ",", "elem", ")", ":", "if", "ctxt", "is", "None", ":", "ctxt__o", "=", "None", "else", ":", "ctxt__o", "=", "ctxt", ".", "_o", "if", "elem", "is", "None", ":", "elem__o", "=", "None", "else", ":"...
40.625
9.25
def find_share_ring(self, tree_map, parent_map, r): """ get a ring structure that tends to share nodes with the tree return a list starting from r """ nset = set(tree_map[r]) cset = nset - set([parent_map[r]]) if len(cset) == 0: return [r] rlst = [r] cnt = 0 for v in cset: vlst = self.find_share_ring(tree_map, parent_map, v) cnt += 1 if cnt == len(cset): vlst.reverse() rlst += vlst return rlst
[ "def", "find_share_ring", "(", "self", ",", "tree_map", ",", "parent_map", ",", "r", ")", ":", "nset", "=", "set", "(", "tree_map", "[", "r", "]", ")", "cset", "=", "nset", "-", "set", "(", "[", "parent_map", "[", "r", "]", "]", ")", "if", "len",...
30.166667
13.833333
def read_response(self, response=None): '''Read the response's HTTP status line and header fields. Coroutine. ''' _logger.debug('Reading header.') if response is None: response = Response() header_lines = [] bytes_read = 0 while True: try: data = yield from self._connection.readline() except ValueError as error: raise ProtocolError( 'Invalid header: {0}'.format(error)) from error self._data_event_dispatcher.notify_read(data) if not data.endswith(b'\n'): raise NetworkError('Connection closed.') elif data in (b'\r\n', b'\n'): break header_lines.append(data) assert data.endswith(b'\n') bytes_read += len(data) if bytes_read > 32768: raise ProtocolError('Header too big.') if not header_lines: raise ProtocolError('No header received.') response.parse(b''.join(header_lines)) return response
[ "def", "read_response", "(", "self", ",", "response", "=", "None", ")", ":", "_logger", ".", "debug", "(", "'Reading header.'", ")", "if", "response", "is", "None", ":", "response", "=", "Response", "(", ")", "header_lines", "=", "[", "]", "bytes_read", ...
26.487805
20.487805
def classify_regions(dataset, masks, method='ERF', threshold=0.08, remove_overlap=True, regularization='scale', output='summary', studies=None, features=None, class_weight='auto', classifier=None, cross_val='4-Fold', param_grid=None, scoring='accuracy'): """ Perform classification on specified regions Given a set of masks, this function retrieves studies associated with each mask at the specified threshold, optionally removes overlap and filters by studies and features. Then it trains an algorithm to classify studies based on features and tests performance. Args: dataset: a Neurosynth dataset maks: a list of paths to Nifti masks method: a string indicating which method to used. 'SVM': Support Vector Classifier with rbf kernel 'ERF': Extremely Randomized Forest classifier 'Dummy': A dummy classifier using stratified classes as predictor threshold: percentage of voxels active within the mask for study to be included remove_overlap: A boolean indicating if studies studies that appear in more than one mask should be excluded regularization: A string indicating type of regularization to use. If None, performs no regularization. 'scale': Unit scale without demeaning output: A string indicating output type 'summary': Dictionary with summary statistics including score and n 'summary_clf': Same as above but also includes classifier 'clf': Only returns classifier Warning: using cv without grid will return an untrained classifier studies: An optional list of study names used to constrain the set used in classification. If None, will use all features in the dataset. features: An optional list of feature names used to constrain the set used in classification. If None, will use all features in the dataset. class_weight: Parameter to pass to classifier determining how to weight classes classifier: An optional sci-kit learn classifier to use instead of pre-set up classifiers set up using 'method' cross_val: A string indicating type of cross validation to use. Can also pass a scikit_classifier param_grid: A dictionary indicating which parameters to optimize using GridSearchCV. If None, no GridSearch will be used Returns: A tuple (X, y) of np arrays. X is a feature by studies matrix and y is a vector of class labels """ (X, y) = get_studies_by_regions(dataset, masks, threshold, remove_overlap, studies, features, regularization=regularization) return classify(X, y, method, classifier, output, cross_val, class_weight, scoring=scoring, param_grid=param_grid)
[ "def", "classify_regions", "(", "dataset", ",", "masks", ",", "method", "=", "'ERF'", ",", "threshold", "=", "0.08", ",", "remove_overlap", "=", "True", ",", "regularization", "=", "'scale'", ",", "output", "=", "'summary'", ",", "studies", "=", "None", ",...
53.45
25.116667
def _prepare_client(client_or_address): """ :param client_or_address: one of: * None * verbatim: 'local' * string address * a Client instance :return: a tuple: (Client instance, shutdown callback function). :raises: ValueError if no valid client input was provided. """ if client_or_address is None or str(client_or_address).lower() == 'local': local_cluster = LocalCluster(diagnostics_port=None) client = Client(local_cluster) def close_client_and_local_cluster(verbose=False): if verbose: print('shutting down client and local cluster') client.close() local_cluster.close() return client, close_client_and_local_cluster elif isinstance(client_or_address, str) and client_or_address.lower() != 'local': client = Client(client_or_address) def close_client(verbose=False): if verbose: print('shutting down client') client.close() return client, close_client elif isinstance(client_or_address, Client): def close_dummy(verbose=False): if verbose: print('not shutting down client, client was created externally') return None return client_or_address, close_dummy else: raise ValueError("Invalid client specified {}".format(str(client_or_address)))
[ "def", "_prepare_client", "(", "client_or_address", ")", ":", "if", "client_or_address", "is", "None", "or", "str", "(", "client_or_address", ")", ".", "lower", "(", ")", "==", "'local'", ":", "local_cluster", "=", "LocalCluster", "(", "diagnostics_port", "=", ...
29.851064
21.978723
def search_ap(self, mode, query): """搜索接入点 查看指定接入点的所有配置信息,包括所有监听端口的配置。 Args: - mode: 搜索模式,可以是domain、ip、host - query: 搜索文本 Returns: 返回一个tuple对象,其格式为(<result>, <ResponseInfo>) - result 成功返回搜索结果,失败返回{"error": "<errMsg string>"} - ResponseInfo 请求的Response信息 """ url = '{0}/v3/aps/search?{1}={2}'.format(self.host, mode, query) return self.__get(url)
[ "def", "search_ap", "(", "self", ",", "mode", ",", "query", ")", ":", "url", "=", "'{0}/v3/aps/search?{1}={2}'", ".", "format", "(", "self", ".", "host", ",", "mode", ",", "query", ")", "return", "self", ".", "__get", "(", "url", ")" ]
29.25
18.1875
def get_store_credit_by_id(cls, store_credit_id, **kwargs): """Find StoreCredit Return single instance of StoreCredit by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_store_credit_by_id(store_credit_id, async=True) >>> result = thread.get() :param async bool :param str store_credit_id: ID of storeCredit to return (required) :return: StoreCredit If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_store_credit_by_id_with_http_info(store_credit_id, **kwargs) else: (data) = cls._get_store_credit_by_id_with_http_info(store_credit_id, **kwargs) return data
[ "def", "get_store_credit_by_id", "(", "cls", ",", "store_credit_id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_get_store_credi...
43.47619
21.380952
def namedAny(name): """ Retrieve a Python object by its fully qualified name from the global Python module namespace. The first part of the name, that describes a module, will be discovered and imported. Each subsequent part of the name is treated as the name of an attribute of the object specified by all of the name which came before it. For example, the fully-qualified name of this object is 'twisted.python.reflect.namedAny'. @type name: L{str} @param name: The name of the object to return. @raise InvalidName: If the name is an empty string, starts or ends with a '.', or is otherwise syntactically incorrect. @raise ModuleNotFound: If the name is syntactically correct but the module it specifies cannot be imported because it does not appear to exist. @raise ObjectNotFound: If the name is syntactically correct, includes at least one '.', but the module it specifies cannot be imported because it does not appear to exist. @raise AttributeError: If an attribute of an object along the way cannot be accessed, or a module along the way is not found. @return: the Python object identified by 'name'. """ if not name: raise InvalidName('Empty module name') names = name.split('.') # if the name starts or ends with a '.' or contains '..', the __import__ # will raise an 'Empty module name' error. This will provide a better error # message. if '' in names: raise InvalidName( "name must be a string giving a '.'-separated list of Python " "identifiers, not %r" % (name,)) topLevelPackage = None moduleNames = names[:] while not topLevelPackage: if moduleNames: trialname = '.'.join(moduleNames) try: topLevelPackage = _importAndCheckStack(trialname) except _NoModuleFound: moduleNames.pop() else: if len(names) == 1: raise ModuleNotFound("No module named %r" % (name,)) else: raise ObjectNotFound('%r does not name an object' % (name,)) obj = topLevelPackage for n in names[1:]: obj = getattr(obj, n) return obj
[ "def", "namedAny", "(", "name", ")", ":", "if", "not", "name", ":", "raise", "InvalidName", "(", "'Empty module name'", ")", "names", "=", "name", ".", "split", "(", "'.'", ")", "# if the name starts or ends with a '.' or contains '..', the __import__", "# will raise ...
36.344262
24.540984
def can_create_log_with_record_types(self, log_record_types): """Tests if this user can create a single ``Log`` using the desired record types. While ``LoggingManager.getLogRecordTypes()`` can be used to examine which records are supported, this method tests which record(s) are required for creating a specific ``Log``. Providing an empty array tests if a ``Log`` can be created with no records. arg: log_record_types (osid.type.Type[]): array of log record types return: (boolean) - ``true`` if ``Log`` creation using the specified record ``Types`` is supported, ``false`` otherwise raise: NullArgument - ``log_record_types`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinAdminSession.can_create_bin_with_record_types # NOTE: It is expected that real authentication hints will be # handled in a service adapter above the pay grade of this impl. if self._catalog_session is not None: return self._catalog_session.can_create_catalog_with_record_types(catalog_record_types=log_record_types) return True
[ "def", "can_create_log_with_record_types", "(", "self", ",", "log_record_types", ")", ":", "# Implemented from template for", "# osid.resource.BinAdminSession.can_create_bin_with_record_types", "# NOTE: It is expected that real authentication hints will be", "# handled in a service adapter abo...
50.44
25.4
def opticalModel(sim, ver: xarray.DataArray, obsAlt_km: float, zenithang: float): """ ver: Nalt x Nwavelength """ assert isinstance(ver, xarray.DataArray) # %% get system optical transmission T optT = getSystemT(ver.wavelength_nm, sim.bg3fn, sim.windowfn, sim.qefn, obsAlt_km, zenithang) # %% first multiply VER by T, THEN sum overall wavelengths if sim.opticalfilter == 'bg3': VERgray = (ver*optT['sys'].values[None, :]).sum('wavelength_nm') elif sim.opticalfilter == 'none': VERgray = (ver*optT['sysNObg3'].values[None, :]).sum('wavelength_nm') else: logging.warning(f'unknown OpticalFilter type: {sim.opticalfilter}' ' falling back to using no filter at all') VERgray = (ver*optT['sysNObg3'].values[None, :]).sum('wavelength_nm') return VERgray
[ "def", "opticalModel", "(", "sim", ",", "ver", ":", "xarray", ".", "DataArray", ",", "obsAlt_km", ":", "float", ",", "zenithang", ":", "float", ")", ":", "assert", "isinstance", "(", "ver", ",", "xarray", ".", "DataArray", ")", "# %% get system optical trans...
43.421053
23.421053