text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def receive(self, path, diff, showProgress=True): """ Return a context manager for stream that will store a diff. """ directory = os.path.dirname(path) cmd = ["btrfs", "receive", "-e", directory] if Store.skipDryRun(logger, self.dryrun)("Command: %s", cmd): return None if not os.path.exists(directory): os.makedirs(directory) process = subprocess.Popen( cmd, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=DEVNULL, ) _makeNice(process) return _Writer(process, process.stdin, path, diff, showProgress)
[ "def", "receive", "(", "self", ",", "path", ",", "diff", ",", "showProgress", "=", "True", ")", ":", "directory", "=", "os", ".", "path", ".", "dirname", "(", "path", ")", "cmd", "=", "[", "\"btrfs\"", ",", "\"receive\"", ",", "\"-e\"", ",", "directory", "]", "if", "Store", ".", "skipDryRun", "(", "logger", ",", "self", ".", "dryrun", ")", "(", "\"Command: %s\"", ",", "cmd", ")", ":", "return", "None", "if", "not", "os", ".", "path", ".", "exists", "(", "directory", ")", ":", "os", ".", "makedirs", "(", "directory", ")", "process", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "DEVNULL", ",", ")", "_makeNice", "(", "process", ")", "return", "_Writer", "(", "process", ",", "process", ".", "stdin", ",", "path", ",", "diff", ",", "showProgress", ")" ]
30.333333
19.238095
def split(self, indice=None): """ Splits the Stack, either in half, or at the given indice, into two separate Stacks. :arg int indice: Optional. The indice to split the Stack at. Defaults to the middle of the ``Stack``. :returns: The two parts of the Stack, as separate Stack instances. """ self_size = self.size if self_size > 1: if not indice: mid = self_size // 2 return Stack(cards=self[0:mid]), Stack(cards=self[mid::]) else: return Stack(cards=self[0:indice]), Stack(cards=self[indice::]) else: return Stack(cards=self.cards), Stack()
[ "def", "split", "(", "self", ",", "indice", "=", "None", ")", ":", "self_size", "=", "self", ".", "size", "if", "self_size", ">", "1", ":", "if", "not", "indice", ":", "mid", "=", "self_size", "//", "2", "return", "Stack", "(", "cards", "=", "self", "[", "0", ":", "mid", "]", ")", ",", "Stack", "(", "cards", "=", "self", "[", "mid", ":", ":", "]", ")", "else", ":", "return", "Stack", "(", "cards", "=", "self", "[", "0", ":", "indice", "]", ")", ",", "Stack", "(", "cards", "=", "self", "[", "indice", ":", ":", "]", ")", "else", ":", "return", "Stack", "(", "cards", "=", "self", ".", "cards", ")", ",", "Stack", "(", ")" ]
32.454545
21.545455
def collect_dirs(path, max_depth=1, followlinks=True): """Recursively find directories under the given path.""" if not os.path.isdir(path): return [] o = [path] if max_depth == 0: return o for subdir in os.listdir(path): subdir = os.path.join(path, subdir) if not os.path.isdir(subdir): continue o += [subdir] if os.path.islink(subdir) and not followlinks: continue if max_depth > 0: o += collect_dirs(subdir, max_depth=max_depth - 1) return list(set(o))
[ "def", "collect_dirs", "(", "path", ",", "max_depth", "=", "1", ",", "followlinks", "=", "True", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "return", "[", "]", "o", "=", "[", "path", "]", "if", "max_depth", "==", "0", ":", "return", "o", "for", "subdir", "in", "os", ".", "listdir", "(", "path", ")", ":", "subdir", "=", "os", ".", "path", ".", "join", "(", "path", ",", "subdir", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "subdir", ")", ":", "continue", "o", "+=", "[", "subdir", "]", "if", "os", ".", "path", ".", "islink", "(", "subdir", ")", "and", "not", "followlinks", ":", "continue", "if", "max_depth", ">", "0", ":", "o", "+=", "collect_dirs", "(", "subdir", ",", "max_depth", "=", "max_depth", "-", "1", ")", "return", "list", "(", "set", "(", "o", ")", ")" ]
20.518519
24.148148
def _show_annotation_box(self, event): """Update an existing box or create an annotation box for an event.""" ax = event.artist.axes # Get the pre-created annotation box for the axes or create a new one. if self.display != 'multiple': annotation = self.annotations[ax] elif event.mouseevent in self.annotations: # Avoid creating multiple datacursors for the same click event # when several artists are selected. annotation = self.annotations[event.mouseevent] else: annotation = self.annotate(ax, **self._annotation_kwargs) self.annotations[event.mouseevent] = annotation if self.display == 'single': # Hide any other annotation boxes... for ann in self.annotations.values(): ann.set_visible(False) self.update(event, annotation)
[ "def", "_show_annotation_box", "(", "self", ",", "event", ")", ":", "ax", "=", "event", ".", "artist", ".", "axes", "# Get the pre-created annotation box for the axes or create a new one.", "if", "self", ".", "display", "!=", "'multiple'", ":", "annotation", "=", "self", ".", "annotations", "[", "ax", "]", "elif", "event", ".", "mouseevent", "in", "self", ".", "annotations", ":", "# Avoid creating multiple datacursors for the same click event", "# when several artists are selected.", "annotation", "=", "self", ".", "annotations", "[", "event", ".", "mouseevent", "]", "else", ":", "annotation", "=", "self", ".", "annotate", "(", "ax", ",", "*", "*", "self", ".", "_annotation_kwargs", ")", "self", ".", "annotations", "[", "event", ".", "mouseevent", "]", "=", "annotation", "if", "self", ".", "display", "==", "'single'", ":", "# Hide any other annotation boxes...", "for", "ann", "in", "self", ".", "annotations", ".", "values", "(", ")", ":", "ann", ".", "set_visible", "(", "False", ")", "self", ".", "update", "(", "event", ",", "annotation", ")" ]
44.4
15.4
def atom_fractions(self): r'''Dictionary of atom:fractional occurence of the elements in a chemical. Useful when performing element balances. For mass-fraction occurences, see :obj:`mass_fractions`. Examples -------- >>> Chemical('Ammonium aluminium sulfate').atom_fractions {'H': 0.25, 'S': 0.125, 'Al': 0.0625, 'O': 0.5, 'N': 0.0625} ''' if self.__atom_fractions: return self.__atom_fractions else: self.__atom_fractions = atom_fractions(self.atoms) return self.__atom_fractions
[ "def", "atom_fractions", "(", "self", ")", ":", "if", "self", ".", "__atom_fractions", ":", "return", "self", ".", "__atom_fractions", "else", ":", "self", ".", "__atom_fractions", "=", "atom_fractions", "(", "self", ".", "atoms", ")", "return", "self", ".", "__atom_fractions" ]
38.866667
21
def _create_parsing_plan(self, desired_type: Type[T], filesystem_object: PersistedObject, logger: Logger, log_only_last: bool = False): """ Adds a log message and creates a recursive parsing plan. :param desired_type: :param filesystem_object: :param logger: :param log_only_last: a flag to only log the last part of the file path (default False) :return: """ logger.debug('(B) ' + get_parsing_plan_log_str(filesystem_object, desired_type, log_only_last=log_only_last, parser=self)) return AnyParser._RecursiveParsingPlan(desired_type, filesystem_object, self, logger)
[ "def", "_create_parsing_plan", "(", "self", ",", "desired_type", ":", "Type", "[", "T", "]", ",", "filesystem_object", ":", "PersistedObject", ",", "logger", ":", "Logger", ",", "log_only_last", ":", "bool", "=", "False", ")", ":", "logger", ".", "debug", "(", "'(B) '", "+", "get_parsing_plan_log_str", "(", "filesystem_object", ",", "desired_type", ",", "log_only_last", "=", "log_only_last", ",", "parser", "=", "self", ")", ")", "return", "AnyParser", ".", "_RecursiveParsingPlan", "(", "desired_type", ",", "filesystem_object", ",", "self", ",", "logger", ")" ]
51.428571
30
def yaml_conf_as_dict(file_path, encoding=None): """ 读入 yaml 配置文件,返回根据配置文件内容生成的字典类型变量 :param: * file_path: (string) 需要读入的 yaml 配置文件长文件名 * encoding: (string) 文件编码 * msg: (string) 读取配置信息 :return: * flag: (bool) 读取配置文件是否正确,正确返回 True,错误返回 False * d: (dict) 如果读取配置文件正确返回的包含配置文件内容的字典,字典内容顺序与配置文件顺序保持一致 举例如下:: print('--- yaml_conf_as_dict demo---') # 定义配置文件名 conf_filename = 'test_conf.yaml' # 读取配置文件 ds = yaml_conf_as_dict(conf_filename, encoding='utf-8') # 显示是否成功,所有 dict 的内容,dict 的 key 数量 print('flag:', ds[0]) print('dict length:', len(ds[1])) print('msg:', len(ds[1])) print('conf info: ', ds[1].get('tree')) print('---') 执行结果:: --- yaml_conf_as_dict demo--- flag: True dict length: 2 msg: Success conf info: ['README.md', 'requirements.txt', {'hellopackage': ['__init__.py']}, {'test': ['__init__.py']}, {'doc': ['doc.rst']}] --- """ if not pathlib.Path(file_path).is_file(): return False, {}, 'File not exist' try: if sys.version > '3': with open(file_path, 'r', encoding=encoding) as f: d = OrderedDict(yaml.load(f.read())) return True, d, 'Success' else: with open(file_path, 'r') as f: d = OrderedDict(yaml.load(f.read())) return True, d, 'Success' except: return False, {}, 'Unknow error'
[ "def", "yaml_conf_as_dict", "(", "file_path", ",", "encoding", "=", "None", ")", ":", "if", "not", "pathlib", ".", "Path", "(", "file_path", ")", ".", "is_file", "(", ")", ":", "return", "False", ",", "{", "}", ",", "'File not exist'", "try", ":", "if", "sys", ".", "version", ">", "'3'", ":", "with", "open", "(", "file_path", ",", "'r'", ",", "encoding", "=", "encoding", ")", "as", "f", ":", "d", "=", "OrderedDict", "(", "yaml", ".", "load", "(", "f", ".", "read", "(", ")", ")", ")", "return", "True", ",", "d", ",", "'Success'", "else", ":", "with", "open", "(", "file_path", ",", "'r'", ")", "as", "f", ":", "d", "=", "OrderedDict", "(", "yaml", ".", "load", "(", "f", ".", "read", "(", ")", ")", ")", "return", "True", ",", "d", ",", "'Success'", "except", ":", "return", "False", ",", "{", "}", ",", "'Unknow error'" ]
28.807692
18.153846
def discover(timeout=5, include_invisible=False, interface_addr=None, all_households=False): """ Discover Sonos zones on the local network. Return a set of `SoCo` instances for each zone found. Include invisible zones (bridges and slave zones in stereo pairs if ``include_invisible`` is `True`. Will block for up to ``timeout`` seconds, after which return `None` if no zones found. Args: timeout (int, optional): block for this many seconds, at most. Defaults to 5. include_invisible (bool, optional): include invisible zones in the return set. Defaults to `False`. interface_addr (str or None): Discovery operates by sending UDP multicast datagrams. ``interface_addr`` is a string (dotted quad) representation of the network interface address to use as the source of the datagrams (i.e. it is a value for `socket.IP_MULTICAST_IF <socket>`). If `None` or not specified, all system interfaces will be tried. Defaults to `None`. all_households (bool, optional): wait for all replies to discover multiple households. If `False` or not specified, return only the first household found. Returns: set: a set of `SoCo` instances, one for each zone found, or else `None`. """ found_zones = set() first_response = None def callback(zone): nonlocal first_response if first_response is None: first_response = time.monotonic() if include_invisible: found_zones.update(zone.all_zones) else: found_zones.update(zone.visible_zones) if not all_households: thread.stop() thread = discover_thread( callback, timeout, include_invisible, interface_addr) while thread.is_alive() and not thread.stopped(): if first_response is None: thread.join(timeout=1) else: thread.join(timeout=first_response + 1 - time.monotonic()) thread.stop() return found_zones or None
[ "def", "discover", "(", "timeout", "=", "5", ",", "include_invisible", "=", "False", ",", "interface_addr", "=", "None", ",", "all_households", "=", "False", ")", ":", "found_zones", "=", "set", "(", ")", "first_response", "=", "None", "def", "callback", "(", "zone", ")", ":", "nonlocal", "first_response", "if", "first_response", "is", "None", ":", "first_response", "=", "time", ".", "monotonic", "(", ")", "if", "include_invisible", ":", "found_zones", ".", "update", "(", "zone", ".", "all_zones", ")", "else", ":", "found_zones", ".", "update", "(", "zone", ".", "visible_zones", ")", "if", "not", "all_households", ":", "thread", ".", "stop", "(", ")", "thread", "=", "discover_thread", "(", "callback", ",", "timeout", ",", "include_invisible", ",", "interface_addr", ")", "while", "thread", ".", "is_alive", "(", ")", "and", "not", "thread", ".", "stopped", "(", ")", ":", "if", "first_response", "is", "None", ":", "thread", ".", "join", "(", "timeout", "=", "1", ")", "else", ":", "thread", ".", "join", "(", "timeout", "=", "first_response", "+", "1", "-", "time", ".", "monotonic", "(", ")", ")", "thread", ".", "stop", "(", ")", "return", "found_zones", "or", "None" ]
36.137931
21.706897
def acquire (self, blocking=1): """Acquire lock.""" threadname = threading.currentThread().getName() log.debug(LOG_THREAD, "Acquire %s for %s", self.name, threadname) self.lock.acquire(blocking) log.debug(LOG_THREAD, "...acquired %s for %s", self.name, threadname)
[ "def", "acquire", "(", "self", ",", "blocking", "=", "1", ")", ":", "threadname", "=", "threading", ".", "currentThread", "(", ")", ".", "getName", "(", ")", "log", ".", "debug", "(", "LOG_THREAD", ",", "\"Acquire %s for %s\"", ",", "self", ".", "name", ",", "threadname", ")", "self", ".", "lock", ".", "acquire", "(", "blocking", ")", "log", ".", "debug", "(", "LOG_THREAD", ",", "\"...acquired %s for %s\"", ",", "self", ".", "name", ",", "threadname", ")" ]
49.833333
16.666667
def load_dependencies(req, history=None): """ Load the dependency tree as a Python object tree, suitable for JSON serialization. >>> deps = load_dependencies('jaraco.packaging') >>> import json >>> doc = json.dumps(deps) """ if history is None: history = set() dist = pkg_resources.get_distribution(req) spec = dict( requirement=str(req), resolved=str(dist), ) if req not in history: # traverse into children history.add(req) extras = parse_extras(req) depends = [ load_dependencies(dep, history=history) for dep in dist.requires(extras=extras) ] if depends: spec.update(depends=depends) return spec
[ "def", "load_dependencies", "(", "req", ",", "history", "=", "None", ")", ":", "if", "history", "is", "None", ":", "history", "=", "set", "(", ")", "dist", "=", "pkg_resources", ".", "get_distribution", "(", "req", ")", "spec", "=", "dict", "(", "requirement", "=", "str", "(", "req", ")", ",", "resolved", "=", "str", "(", "dist", ")", ",", ")", "if", "req", "not", "in", "history", ":", "# traverse into children", "history", ".", "add", "(", "req", ")", "extras", "=", "parse_extras", "(", "req", ")", "depends", "=", "[", "load_dependencies", "(", "dep", ",", "history", "=", "history", ")", "for", "dep", "in", "dist", ".", "requires", "(", "extras", "=", "extras", ")", "]", "if", "depends", ":", "spec", ".", "update", "(", "depends", "=", "depends", ")", "return", "spec" ]
27.185185
14.37037
def set_headers(self) -> None: """Sets the content and caching headers on the response. .. versionadded:: 3.1 """ self.set_header("Accept-Ranges", "bytes") self.set_etag_header() if self.modified is not None: self.set_header("Last-Modified", self.modified) content_type = self.get_content_type() if content_type: self.set_header("Content-Type", content_type) cache_time = self.get_cache_time(self.path, self.modified, content_type) if cache_time > 0: self.set_header( "Expires", datetime.datetime.utcnow() + datetime.timedelta(seconds=cache_time), ) self.set_header("Cache-Control", "max-age=" + str(cache_time)) self.set_extra_headers(self.path)
[ "def", "set_headers", "(", "self", ")", "->", "None", ":", "self", ".", "set_header", "(", "\"Accept-Ranges\"", ",", "\"bytes\"", ")", "self", ".", "set_etag_header", "(", ")", "if", "self", ".", "modified", "is", "not", "None", ":", "self", ".", "set_header", "(", "\"Last-Modified\"", ",", "self", ".", "modified", ")", "content_type", "=", "self", ".", "get_content_type", "(", ")", "if", "content_type", ":", "self", ".", "set_header", "(", "\"Content-Type\"", ",", "content_type", ")", "cache_time", "=", "self", ".", "get_cache_time", "(", "self", ".", "path", ",", "self", ".", "modified", ",", "content_type", ")", "if", "cache_time", ">", "0", ":", "self", ".", "set_header", "(", "\"Expires\"", ",", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "+", "datetime", ".", "timedelta", "(", "seconds", "=", "cache_time", ")", ",", ")", "self", ".", "set_header", "(", "\"Cache-Control\"", ",", "\"max-age=\"", "+", "str", "(", "cache_time", ")", ")", "self", ".", "set_extra_headers", "(", "self", ".", "path", ")" ]
33.666667
20.291667
def load_truetype_font( path: str, tile_width: int, tile_height: int ) -> Tileset: """Return a new Tileset from a `.ttf` or `.otf` file. Same as :any:`set_truetype_font`, but returns a :any:`Tileset` instead. You can send this Tileset to :any:`set_default`. This function is provisional. The API may change. """ if not os.path.exists(path): raise RuntimeError("File not found:\n\t%s" % (os.path.realpath(path),)) return Tileset._claim( lib.TCOD_load_truetype_font_(path.encode(), tile_width, tile_height) )
[ "def", "load_truetype_font", "(", "path", ":", "str", ",", "tile_width", ":", "int", ",", "tile_height", ":", "int", ")", "->", "Tileset", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "raise", "RuntimeError", "(", "\"File not found:\\n\\t%s\"", "%", "(", "os", ".", "path", ".", "realpath", "(", "path", ")", ",", ")", ")", "return", "Tileset", ".", "_claim", "(", "lib", ".", "TCOD_load_truetype_font_", "(", "path", ".", "encode", "(", ")", ",", "tile_width", ",", "tile_height", ")", ")" ]
36.466667
21.666667
def ToScriptHash(self, address): """ Retrieve the script_hash based from an address. Args: address (str): a base58 encoded address. Raises: ValuesError: if an invalid address is supplied or the coin version is incorrect Exception: if the address string does not start with 'A' or the checksum fails Returns: UInt160: script hash. """ if len(address) == 34: if address[0] == 'A': data = b58decode(address) if data[0] != self.AddressVersion: raise ValueError('Not correct Coin Version') checksum = Crypto.Default().Hash256(data[:21])[:4] if checksum != data[21:]: raise Exception('Address format error') return UInt160(data=data[1:21]) else: raise Exception('Address format error') else: raise ValueError('Not correct Address, wrong length.')
[ "def", "ToScriptHash", "(", "self", ",", "address", ")", ":", "if", "len", "(", "address", ")", "==", "34", ":", "if", "address", "[", "0", "]", "==", "'A'", ":", "data", "=", "b58decode", "(", "address", ")", "if", "data", "[", "0", "]", "!=", "self", ".", "AddressVersion", ":", "raise", "ValueError", "(", "'Not correct Coin Version'", ")", "checksum", "=", "Crypto", ".", "Default", "(", ")", ".", "Hash256", "(", "data", "[", ":", "21", "]", ")", "[", ":", "4", "]", "if", "checksum", "!=", "data", "[", "21", ":", "]", ":", "raise", "Exception", "(", "'Address format error'", ")", "return", "UInt160", "(", "data", "=", "data", "[", "1", ":", "21", "]", ")", "else", ":", "raise", "Exception", "(", "'Address format error'", ")", "else", ":", "raise", "ValueError", "(", "'Not correct Address, wrong length.'", ")" ]
35.75
20.535714
def _prepare_url_params(tile_id, bbox, end_date, start_date, absolute_orbit): """ Constructs dict with URL params :param tile_id: original tile identification string provided by ESA (e.g. 'S2A_OPER_MSI_L1C_TL_SGS__20160109T230542_A002870_T10UEV_N02.01') :type tile_id: str :param bbox: bounding box of requested area in WGS84 CRS :type bbox: geometry.BBox :param start_date: beginning of time range in ISO8601 format :type start_date: str :param end_date: end of time range in ISO8601 format :type end_date: str :param absolute_orbit: An absolute orbit number of Sentinel-2 L1C products as defined by ESA :type absolute_orbit: int :return: dictionary with parameters as properties when arguments not None :rtype: dict """ url_params = { 'identifier': tile_id, 'startDate': start_date, 'completionDate': end_date, 'orbitNumber': absolute_orbit, 'box': bbox } return {key: str(value) for key, value in url_params.items() if value}
[ "def", "_prepare_url_params", "(", "tile_id", ",", "bbox", ",", "end_date", ",", "start_date", ",", "absolute_orbit", ")", ":", "url_params", "=", "{", "'identifier'", ":", "tile_id", ",", "'startDate'", ":", "start_date", ",", "'completionDate'", ":", "end_date", ",", "'orbitNumber'", ":", "absolute_orbit", ",", "'box'", ":", "bbox", "}", "return", "{", "key", ":", "str", "(", "value", ")", "for", "key", ",", "value", "in", "url_params", ".", "items", "(", ")", "if", "value", "}" ]
41.32
21.8
def seek_end(fileobj, offset): """Like fileobj.seek(-offset, 2), but will not try to go beyond the start Needed since file objects from BytesIO will not raise IOError and file objects from open() will raise IOError if going to a negative offset. To make things easier for custom implementations, instead of allowing both behaviors, we just don't do it. Args: fileobj (fileobj) offset (int): how many bytes away from the end backwards to seek to Raises: IOError """ if offset < 0: raise ValueError if get_size(fileobj) < offset: fileobj.seek(0, 0) else: fileobj.seek(-offset, 2)
[ "def", "seek_end", "(", "fileobj", ",", "offset", ")", ":", "if", "offset", "<", "0", ":", "raise", "ValueError", "if", "get_size", "(", "fileobj", ")", "<", "offset", ":", "fileobj", ".", "seek", "(", "0", ",", "0", ")", "else", ":", "fileobj", ".", "seek", "(", "-", "offset", ",", "2", ")" ]
28.347826
23.565217
def send(self, data, room=None, skip_sid=None, namespace=None, callback=None): """Send a message to the server. The only difference with the :func:`socketio.Client.send` method is that when the ``namespace`` argument is not given the namespace associated with the class is used. """ return self.client.send(data, namespace=namespace or self.namespace, callback=callback)
[ "def", "send", "(", "self", ",", "data", ",", "room", "=", "None", ",", "skip_sid", "=", "None", ",", "namespace", "=", "None", ",", "callback", "=", "None", ")", ":", "return", "self", ".", "client", ".", "send", "(", "data", ",", "namespace", "=", "namespace", "or", "self", ".", "namespace", ",", "callback", "=", "callback", ")" ]
45.5
18.8
def get_msg_info(yaml_info, topics, parse_header=True): ''' Get info from all of the messages about what they contain and will be added to the dataframe ''' topic_info = yaml_info['topics'] msgs = {} classes = {} for topic in topics: base_key = get_key_name(topic) msg_paths = [] msg_types = {} for info in topic_info: if info['topic'] == topic: msg_class = get_message_class(info['type']) if msg_class is None: warnings.warn( 'Could not find types for ' + topic + ' skpping ') else: (msg_paths, msg_types) = get_base_fields(msg_class(), "", parse_header) msgs[topic] = msg_paths classes[topic] = msg_types return (msgs, classes)
[ "def", "get_msg_info", "(", "yaml_info", ",", "topics", ",", "parse_header", "=", "True", ")", ":", "topic_info", "=", "yaml_info", "[", "'topics'", "]", "msgs", "=", "{", "}", "classes", "=", "{", "}", "for", "topic", "in", "topics", ":", "base_key", "=", "get_key_name", "(", "topic", ")", "msg_paths", "=", "[", "]", "msg_types", "=", "{", "}", "for", "info", "in", "topic_info", ":", "if", "info", "[", "'topic'", "]", "==", "topic", ":", "msg_class", "=", "get_message_class", "(", "info", "[", "'type'", "]", ")", "if", "msg_class", "is", "None", ":", "warnings", ".", "warn", "(", "'Could not find types for '", "+", "topic", "+", "' skpping '", ")", "else", ":", "(", "msg_paths", ",", "msg_types", ")", "=", "get_base_fields", "(", "msg_class", "(", ")", ",", "\"\"", ",", "parse_header", ")", "msgs", "[", "topic", "]", "=", "msg_paths", "classes", "[", "topic", "]", "=", "msg_types", "return", "(", "msgs", ",", "classes", ")" ]
35.64
17.32
def vmx_path(self, vmx_path): """ Sets the path to the vmx file. :param vmx_path: VMware vmx file """ log.info("VMware VM '{name}' [{id}] has set the vmx file path to '{vmx}'".format(name=self.name, id=self.id, vmx=vmx_path)) self._vmx_path = vmx_path
[ "def", "vmx_path", "(", "self", ",", "vmx_path", ")", ":", "log", ".", "info", "(", "\"VMware VM '{name}' [{id}] has set the vmx file path to '{vmx}'\"", ".", "format", "(", "name", "=", "self", ".", "name", ",", "id", "=", "self", ".", "id", ",", "vmx", "=", "vmx_path", ")", ")", "self", ".", "_vmx_path", "=", "vmx_path" ]
32.555556
21.222222
def highpass(self, frequency, gpass=2, gstop=30, fstop=None, type='iir', filtfilt=True, **kwargs): """Filter this `TimeSeries` with a high-pass filter. Parameters ---------- frequency : `float` high-pass corner frequency gpass : `float` the maximum loss in the passband (dB). gstop : `float` the minimum attenuation in the stopband (dB). fstop : `float` stop-band edge frequency, defaults to `frequency * 1.5` type : `str` the filter type, either ``'iir'`` or ``'fir'`` **kwargs other keyword arguments are passed to :func:`gwpy.signal.filter_design.highpass` Returns ------- hpseries : `TimeSeries` a high-passed version of the input `TimeSeries` See Also -------- gwpy.signal.filter_design.highpass for details on the filter design TimeSeries.filter for details on how the filter is applied .. note:: When using `scipy < 0.16.0` some higher-order filters may be unstable. With `scipy >= 0.16.0` higher-order filters are decomposed into second-order-sections, and so are much more stable. """ # design filter filt = filter_design.highpass(frequency, self.sample_rate, fstop=fstop, gpass=gpass, gstop=gstop, analog=False, type=type, **kwargs) # apply filter return self.filter(*filt, filtfilt=filtfilt)
[ "def", "highpass", "(", "self", ",", "frequency", ",", "gpass", "=", "2", ",", "gstop", "=", "30", ",", "fstop", "=", "None", ",", "type", "=", "'iir'", ",", "filtfilt", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# design filter", "filt", "=", "filter_design", ".", "highpass", "(", "frequency", ",", "self", ".", "sample_rate", ",", "fstop", "=", "fstop", ",", "gpass", "=", "gpass", ",", "gstop", "=", "gstop", ",", "analog", "=", "False", ",", "type", "=", "type", ",", "*", "*", "kwargs", ")", "# apply filter", "return", "self", ".", "filter", "(", "*", "filt", ",", "filtfilt", "=", "filtfilt", ")" ]
32.367347
22.510204
def adjoint(self): """Adjoint, given as scaling with the conjugate of the scalar. Examples -------- In the real case, the adjoint is the same as the operator: >>> r3 = odl.rn(3) >>> x = r3.element([1, 2, 3]) >>> op = ScalingOperator(r3, 2) >>> op(x) rn(3).element([ 2., 4., 6.]) >>> op.adjoint(x) # The same rn(3).element([ 2., 4., 6.]) In the complex case, the scalar is conjugated: >>> c3 = odl.cn(3) >>> x_complex = c3.element([1, 1j, 1-1j]) >>> op = ScalingOperator(c3, 1+1j) >>> expected_op = ScalingOperator(c3, 1-1j) >>> op.adjoint(x_complex) cn(3).element([ 1.-1.j, 1.+1.j, 0.-2.j]) >>> expected_op(x_complex) # The same cn(3).element([ 1.-1.j, 1.+1.j, 0.-2.j]) Returns ------- adjoint : `ScalingOperator` ``self`` if `scalar` is real, else `scalar` is conjugated. """ if complex(self.scalar).imag == 0.0: return self else: return ScalingOperator(self.domain, self.scalar.conjugate())
[ "def", "adjoint", "(", "self", ")", ":", "if", "complex", "(", "self", ".", "scalar", ")", ".", "imag", "==", "0.0", ":", "return", "self", "else", ":", "return", "ScalingOperator", "(", "self", ".", "domain", ",", "self", ".", "scalar", ".", "conjugate", "(", ")", ")" ]
31.914286
16.914286
def format_numbers(number, prefix=""): """Formats number in the scientific notation for LaTeX. :param number: the number to format. :param prefix: a prefix to add before the number (e.g. "p < "). :type number: str :type prefix: str :returns: a string containing the scientific notation of the number. :rtype: str """ # Matching r = re.match(r"^([-+]?\d*\.\d+|\d+)e([-+]?\d+)$", number) # Nothing matched if not r: if prefix != "": return "$" + prefix + number + "$" else: return number # Getting the coefficient and the exponent coefficient = r.group(1) exponent = int(r.group(2)) return "$" + prefix + coefficient + r"\times 10^{" + str(exponent) + "}$"
[ "def", "format_numbers", "(", "number", ",", "prefix", "=", "\"\"", ")", ":", "# Matching", "r", "=", "re", ".", "match", "(", "r\"^([-+]?\\d*\\.\\d+|\\d+)e([-+]?\\d+)$\"", ",", "number", ")", "# Nothing matched", "if", "not", "r", ":", "if", "prefix", "!=", "\"\"", ":", "return", "\"$\"", "+", "prefix", "+", "number", "+", "\"$\"", "else", ":", "return", "number", "# Getting the coefficient and the exponent", "coefficient", "=", "r", ".", "group", "(", "1", ")", "exponent", "=", "int", "(", "r", ".", "group", "(", "2", ")", ")", "return", "\"$\"", "+", "prefix", "+", "coefficient", "+", "r\"\\times 10^{\"", "+", "str", "(", "exponent", ")", "+", "\"}$\"" ]
26.357143
22.357143
def enumeration(*values, **kwargs): ''' Create an |Enumeration| object from a sequence of values. Call ``enumeration`` with a sequence of (unique) strings to create an Enumeration object: .. code-block:: python #: Specify the horizontal alignment for rendering text TextAlign = enumeration("left", "right", "center") Args: values (str) : string enumeration values, passed as positional arguments The order of arguments is the order of the enumeration, and the first element will be considered the default value when used to create |Enum| properties. Keyword Args: case_sensitive (bool, optional) : Whether validation should consider case or not (default: True) quote (bool, optional): Whther values should be quoted in the string representations (default: False) Raises: ValueError if values empty, if any value is not a string or not unique Returns: Enumeration ''' if not (values and all(isinstance(value, string_types) and value for value in values)): raise ValueError("expected a non-empty sequence of strings, got %s" % values) if len(values) != len(set(values)): raise ValueError("enumeration items must be unique, got %s" % values) attrs = {value: value for value in values} attrs.update({ "_values": list(values), "_default": values[0], "_case_sensitive": kwargs.get("case_sensitive", True), "_quote": kwargs.get("quote", False), }) return type(str("Enumeration"), (Enumeration,), attrs)()
[ "def", "enumeration", "(", "*", "values", ",", "*", "*", "kwargs", ")", ":", "if", "not", "(", "values", "and", "all", "(", "isinstance", "(", "value", ",", "string_types", ")", "and", "value", "for", "value", "in", "values", ")", ")", ":", "raise", "ValueError", "(", "\"expected a non-empty sequence of strings, got %s\"", "%", "values", ")", "if", "len", "(", "values", ")", "!=", "len", "(", "set", "(", "values", ")", ")", ":", "raise", "ValueError", "(", "\"enumeration items must be unique, got %s\"", "%", "values", ")", "attrs", "=", "{", "value", ":", "value", "for", "value", "in", "values", "}", "attrs", ".", "update", "(", "{", "\"_values\"", ":", "list", "(", "values", ")", ",", "\"_default\"", ":", "values", "[", "0", "]", ",", "\"_case_sensitive\"", ":", "kwargs", ".", "get", "(", "\"case_sensitive\"", ",", "True", ")", ",", "\"_quote\"", ":", "kwargs", ".", "get", "(", "\"quote\"", ",", "False", ")", ",", "}", ")", "return", "type", "(", "str", "(", "\"Enumeration\"", ")", ",", "(", "Enumeration", ",", ")", ",", "attrs", ")", "(", ")" ]
33.3125
27.354167
def selection_sort(arr, simulation=False): """ Selection Sort Complexity: O(n^2) """ iteration = 0 if simulation: print("iteration",iteration,":",*arr) for i in range(len(arr)): minimum = i for j in range(i + 1, len(arr)): # "Select" the correct value if arr[j] < arr[minimum]: minimum = j arr[minimum], arr[i] = arr[i], arr[minimum] if simulation: iteration = iteration + 1 print("iteration",iteration,":",*arr) return arr
[ "def", "selection_sort", "(", "arr", ",", "simulation", "=", "False", ")", ":", "iteration", "=", "0", "if", "simulation", ":", "print", "(", "\"iteration\"", ",", "iteration", ",", "\":\"", ",", "*", "arr", ")", "for", "i", "in", "range", "(", "len", "(", "arr", ")", ")", ":", "minimum", "=", "i", "for", "j", "in", "range", "(", "i", "+", "1", ",", "len", "(", "arr", ")", ")", ":", "# \"Select\" the correct value", "if", "arr", "[", "j", "]", "<", "arr", "[", "minimum", "]", ":", "minimum", "=", "j", "arr", "[", "minimum", "]", ",", "arr", "[", "i", "]", "=", "arr", "[", "i", "]", ",", "arr", "[", "minimum", "]", "if", "simulation", ":", "iteration", "=", "iteration", "+", "1", "print", "(", "\"iteration\"", ",", "iteration", ",", "\":\"", ",", "*", "arr", ")", "return", "arr" ]
25.478261
15.086957
def _update_counters(self, ti_status): """ Updates the counters per state of the tasks that were running. Can re-add to tasks to run in case required. :param ti_status: the internal status of the backfill job tasks :type ti_status: BackfillJob._DagRunTaskStatus """ for key, ti in list(ti_status.running.items()): ti.refresh_from_db() if ti.state == State.SUCCESS: ti_status.succeeded.add(key) self.log.debug("Task instance %s succeeded. Don't rerun.", ti) ti_status.running.pop(key) continue elif ti.state == State.SKIPPED: ti_status.skipped.add(key) self.log.debug("Task instance %s skipped. Don't rerun.", ti) ti_status.running.pop(key) continue elif ti.state == State.FAILED: self.log.error("Task instance %s failed", ti) ti_status.failed.add(key) ti_status.running.pop(key) continue # special case: if the task needs to run again put it back elif ti.state == State.UP_FOR_RETRY: self.log.warning("Task instance %s is up for retry", ti) ti_status.running.pop(key) ti_status.to_run[key] = ti # special case: if the task needs to be rescheduled put it back elif ti.state == State.UP_FOR_RESCHEDULE: self.log.warning("Task instance %s is up for reschedule", ti) ti_status.running.pop(key) ti_status.to_run[key] = ti # special case: The state of the task can be set to NONE by the task itself # when it reaches concurrency limits. It could also happen when the state # is changed externally, e.g. by clearing tasks from the ui. We need to cover # for that as otherwise those tasks would fall outside of the scope of # the backfill suddenly. elif ti.state == State.NONE: self.log.warning( "FIXME: task instance %s state was set to none externally or " "reaching concurrency limits. Re-adding task to queue.", ti ) ti.set_state(State.SCHEDULED) ti_status.running.pop(key) ti_status.to_run[key] = ti
[ "def", "_update_counters", "(", "self", ",", "ti_status", ")", ":", "for", "key", ",", "ti", "in", "list", "(", "ti_status", ".", "running", ".", "items", "(", ")", ")", ":", "ti", ".", "refresh_from_db", "(", ")", "if", "ti", ".", "state", "==", "State", ".", "SUCCESS", ":", "ti_status", ".", "succeeded", ".", "add", "(", "key", ")", "self", ".", "log", ".", "debug", "(", "\"Task instance %s succeeded. Don't rerun.\"", ",", "ti", ")", "ti_status", ".", "running", ".", "pop", "(", "key", ")", "continue", "elif", "ti", ".", "state", "==", "State", ".", "SKIPPED", ":", "ti_status", ".", "skipped", ".", "add", "(", "key", ")", "self", ".", "log", ".", "debug", "(", "\"Task instance %s skipped. Don't rerun.\"", ",", "ti", ")", "ti_status", ".", "running", ".", "pop", "(", "key", ")", "continue", "elif", "ti", ".", "state", "==", "State", ".", "FAILED", ":", "self", ".", "log", ".", "error", "(", "\"Task instance %s failed\"", ",", "ti", ")", "ti_status", ".", "failed", ".", "add", "(", "key", ")", "ti_status", ".", "running", ".", "pop", "(", "key", ")", "continue", "# special case: if the task needs to run again put it back", "elif", "ti", ".", "state", "==", "State", ".", "UP_FOR_RETRY", ":", "self", ".", "log", ".", "warning", "(", "\"Task instance %s is up for retry\"", ",", "ti", ")", "ti_status", ".", "running", ".", "pop", "(", "key", ")", "ti_status", ".", "to_run", "[", "key", "]", "=", "ti", "# special case: if the task needs to be rescheduled put it back", "elif", "ti", ".", "state", "==", "State", ".", "UP_FOR_RESCHEDULE", ":", "self", ".", "log", ".", "warning", "(", "\"Task instance %s is up for reschedule\"", ",", "ti", ")", "ti_status", ".", "running", ".", "pop", "(", "key", ")", "ti_status", ".", "to_run", "[", "key", "]", "=", "ti", "# special case: The state of the task can be set to NONE by the task itself", "# when it reaches concurrency limits. It could also happen when the state", "# is changed externally, e.g. by clearing tasks from the ui. We need to cover", "# for that as otherwise those tasks would fall outside of the scope of", "# the backfill suddenly.", "elif", "ti", ".", "state", "==", "State", ".", "NONE", ":", "self", ".", "log", ".", "warning", "(", "\"FIXME: task instance %s state was set to none externally or \"", "\"reaching concurrency limits. Re-adding task to queue.\"", ",", "ti", ")", "ti", ".", "set_state", "(", "State", ".", "SCHEDULED", ")", "ti_status", ".", "running", ".", "pop", "(", "key", ")", "ti_status", ".", "to_run", "[", "key", "]", "=", "ti" ]
49
16.306122
def rel_paths(self, *args, **kwargs): """ Fix the paths in the given dictionary to get relative paths Parameters ---------- %(ExperimentsConfig.rel_paths.parameters)s Returns ------- %(ExperimentsConfig.rel_paths.returns)s Notes ----- d is modified in place!""" return self.config.experiments.rel_paths(*args, **kwargs)
[ "def", "rel_paths", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "config", ".", "experiments", ".", "rel_paths", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
25.1875
21.25
def verifySignature(self, msg): """ Validate the signature of the request Note: Batch is whitelisted because the inner messages are checked :param msg: a message requiring signature verification :return: None; raises an exception if the signature is not valid """ if isinstance(msg, self.authnWhitelist): return if isinstance(msg, Propagate): typ = 'propagate' req = TxnUtilConfig.client_request_class(**msg.request) else: typ = '' req = msg key = None if isinstance(req, Request): key = req.key if not isinstance(req, Mapping): req = req.as_dict with self.metrics.measure_time(MetricsName.VERIFY_SIGNATURE_TIME): identifiers = self.authNr(req).authenticate(req, key=key) logger.debug("{} authenticated {} signature on {} request {}". format(self, identifiers, typ, req['reqId']), extra={"cli": True, "tags": ["node-msg-processing"]})
[ "def", "verifySignature", "(", "self", ",", "msg", ")", ":", "if", "isinstance", "(", "msg", ",", "self", ".", "authnWhitelist", ")", ":", "return", "if", "isinstance", "(", "msg", ",", "Propagate", ")", ":", "typ", "=", "'propagate'", "req", "=", "TxnUtilConfig", ".", "client_request_class", "(", "*", "*", "msg", ".", "request", ")", "else", ":", "typ", "=", "''", "req", "=", "msg", "key", "=", "None", "if", "isinstance", "(", "req", ",", "Request", ")", ":", "key", "=", "req", ".", "key", "if", "not", "isinstance", "(", "req", ",", "Mapping", ")", ":", "req", "=", "req", ".", "as_dict", "with", "self", ".", "metrics", ".", "measure_time", "(", "MetricsName", ".", "VERIFY_SIGNATURE_TIME", ")", ":", "identifiers", "=", "self", ".", "authNr", "(", "req", ")", ".", "authenticate", "(", "req", ",", "key", "=", "key", ")", "logger", ".", "debug", "(", "\"{} authenticated {} signature on {} request {}\"", ".", "format", "(", "self", ",", "identifiers", ",", "typ", ",", "req", "[", "'reqId'", "]", ")", ",", "extra", "=", "{", "\"cli\"", ":", "True", ",", "\"tags\"", ":", "[", "\"node-msg-processing\"", "]", "}", ")" ]
33.96875
20.90625
def as_widget(self, widget=None, attrs=None, only_initial=False): """ Renders the field by rendering the passed widget, adding any HTML attributes passed as attrs. If no widget is specified, then the field's default widget will be used. """ if not widget: widget = self.field.widget attrs = attrs or {} auto_id = self.auto_id if auto_id and 'id' not in attrs and 'id' not in widget.attrs: if not only_initial: attrs['id'] = auto_id else: attrs['id'] = self.html_initial_id name = "" return widget.render(name, self.value(), attrs=attrs)
[ "def", "as_widget", "(", "self", ",", "widget", "=", "None", ",", "attrs", "=", "None", ",", "only_initial", "=", "False", ")", ":", "if", "not", "widget", ":", "widget", "=", "self", ".", "field", ".", "widget", "attrs", "=", "attrs", "or", "{", "}", "auto_id", "=", "self", ".", "auto_id", "if", "auto_id", "and", "'id'", "not", "in", "attrs", "and", "'id'", "not", "in", "widget", ".", "attrs", ":", "if", "not", "only_initial", ":", "attrs", "[", "'id'", "]", "=", "auto_id", "else", ":", "attrs", "[", "'id'", "]", "=", "self", ".", "html_initial_id", "name", "=", "\"\"", "return", "widget", ".", "render", "(", "name", ",", "self", ".", "value", "(", ")", ",", "attrs", "=", "attrs", ")" ]
35.631579
17.631579
def _install_signal_handlers(self): """ Sets up signal handlers for safely stopping the worker. """ def request_stop(signum, frame): self._stop_requested = True self.log.info('stop requested, waiting for task to finish') signal.signal(signal.SIGINT, request_stop) signal.signal(signal.SIGTERM, request_stop)
[ "def", "_install_signal_handlers", "(", "self", ")", ":", "def", "request_stop", "(", "signum", ",", "frame", ")", ":", "self", ".", "_stop_requested", "=", "True", "self", ".", "log", ".", "info", "(", "'stop requested, waiting for task to finish'", ")", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "request_stop", ")", "signal", ".", "signal", "(", "signal", ".", "SIGTERM", ",", "request_stop", ")" ]
41.222222
9
def saveCopy(self) : "saves a copy of the object and become that copy. returns a tuple (old _key, new _key)" old_key = self._key self.reset(self.collection) self.save() return (old_key, self._key)
[ "def", "saveCopy", "(", "self", ")", ":", "old_key", "=", "self", ".", "_key", "self", ".", "reset", "(", "self", ".", "collection", ")", "self", ".", "save", "(", ")", "return", "(", "old_key", ",", "self", ".", "_key", ")" ]
38.5
19.833333
async def _make_request(self, method: str, url: str, url_vars: Dict[str, str], data: Any, accept: str, jwt: Opt[str] = None, oauth_token: Opt[str] = None, ) -> Tuple[bytes, Opt[str]]: """Construct and make an HTTP request.""" if oauth_token is not None and jwt is not None: raise ValueError("Cannot pass both oauth_token and jwt.") filled_url = sansio.format_url(url, url_vars) if jwt is not None: request_headers = sansio.create_headers( self.requester, accept=accept, jwt=jwt) elif oauth_token is not None: request_headers = sansio.create_headers( self.requester, accept=accept, oauth_token=oauth_token) else: # fallback to using oauth_token request_headers = sansio.create_headers( self.requester, accept=accept, oauth_token=self.oauth_token) cached = cacheable = False # Can't use None as a "no body" sentinel as it's a legitimate JSON type. if data == b"": body = b"" request_headers["content-length"] = "0" if method == "GET" and self._cache is not None: cacheable = True try: etag, last_modified, data, more = self._cache[filled_url] cached = True except KeyError: pass else: if etag is not None: request_headers["if-none-match"] = etag if last_modified is not None: request_headers["if-modified-since"] = last_modified else: charset = "utf-8" body = json.dumps(data).encode(charset) request_headers['content-type'] = f"application/json; charset={charset}" request_headers['content-length'] = str(len(body)) if self.rate_limit is not None: self.rate_limit.remaining -= 1 response = await self._request(method, filled_url, request_headers, body) if not (response[0] == 304 and cached): data, self.rate_limit, more = sansio.decipher_response(*response) has_cache_details = ("etag" in response[1] or "last-modified" in response[1]) if self._cache is not None and cacheable and has_cache_details: etag = response[1].get("etag") last_modified = response[1].get("last-modified") self._cache[filled_url] = etag, last_modified, data, more return data, more
[ "async", "def", "_make_request", "(", "self", ",", "method", ":", "str", ",", "url", ":", "str", ",", "url_vars", ":", "Dict", "[", "str", ",", "str", "]", ",", "data", ":", "Any", ",", "accept", ":", "str", ",", "jwt", ":", "Opt", "[", "str", "]", "=", "None", ",", "oauth_token", ":", "Opt", "[", "str", "]", "=", "None", ",", ")", "->", "Tuple", "[", "bytes", ",", "Opt", "[", "str", "]", "]", ":", "if", "oauth_token", "is", "not", "None", "and", "jwt", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Cannot pass both oauth_token and jwt.\"", ")", "filled_url", "=", "sansio", ".", "format_url", "(", "url", ",", "url_vars", ")", "if", "jwt", "is", "not", "None", ":", "request_headers", "=", "sansio", ".", "create_headers", "(", "self", ".", "requester", ",", "accept", "=", "accept", ",", "jwt", "=", "jwt", ")", "elif", "oauth_token", "is", "not", "None", ":", "request_headers", "=", "sansio", ".", "create_headers", "(", "self", ".", "requester", ",", "accept", "=", "accept", ",", "oauth_token", "=", "oauth_token", ")", "else", ":", "# fallback to using oauth_token", "request_headers", "=", "sansio", ".", "create_headers", "(", "self", ".", "requester", ",", "accept", "=", "accept", ",", "oauth_token", "=", "self", ".", "oauth_token", ")", "cached", "=", "cacheable", "=", "False", "# Can't use None as a \"no body\" sentinel as it's a legitimate JSON type.", "if", "data", "==", "b\"\"", ":", "body", "=", "b\"\"", "request_headers", "[", "\"content-length\"", "]", "=", "\"0\"", "if", "method", "==", "\"GET\"", "and", "self", ".", "_cache", "is", "not", "None", ":", "cacheable", "=", "True", "try", ":", "etag", ",", "last_modified", ",", "data", ",", "more", "=", "self", ".", "_cache", "[", "filled_url", "]", "cached", "=", "True", "except", "KeyError", ":", "pass", "else", ":", "if", "etag", "is", "not", "None", ":", "request_headers", "[", "\"if-none-match\"", "]", "=", "etag", "if", "last_modified", "is", "not", "None", ":", "request_headers", "[", "\"if-modified-since\"", "]", "=", "last_modified", "else", ":", "charset", "=", "\"utf-8\"", "body", "=", "json", ".", "dumps", "(", "data", ")", ".", "encode", "(", "charset", ")", "request_headers", "[", "'content-type'", "]", "=", "f\"application/json; charset={charset}\"", "request_headers", "[", "'content-length'", "]", "=", "str", "(", "len", "(", "body", ")", ")", "if", "self", ".", "rate_limit", "is", "not", "None", ":", "self", ".", "rate_limit", ".", "remaining", "-=", "1", "response", "=", "await", "self", ".", "_request", "(", "method", ",", "filled_url", ",", "request_headers", ",", "body", ")", "if", "not", "(", "response", "[", "0", "]", "==", "304", "and", "cached", ")", ":", "data", ",", "self", ".", "rate_limit", ",", "more", "=", "sansio", ".", "decipher_response", "(", "*", "response", ")", "has_cache_details", "=", "(", "\"etag\"", "in", "response", "[", "1", "]", "or", "\"last-modified\"", "in", "response", "[", "1", "]", ")", "if", "self", ".", "_cache", "is", "not", "None", "and", "cacheable", "and", "has_cache_details", ":", "etag", "=", "response", "[", "1", "]", ".", "get", "(", "\"etag\"", ")", "last_modified", "=", "response", "[", "1", "]", ".", "get", "(", "\"last-modified\"", ")", "self", ".", "_cache", "[", "filled_url", "]", "=", "etag", ",", "last_modified", ",", "data", ",", "more", "return", "data", ",", "more" ]
48.303571
16.428571
def MeshLines(*inputobj, **options): """ Build the line segments between two lists of points `startPoints` and `endPoints`. `startPoints` can be also passed in the form ``[[point1, point2], ...]``. A dolfin ``Mesh`` that was deformed/modified by a function can be passed together as inputs. :param float scale: apply a rescaling factor to the length """ scale = options.pop("scale", 1) lw = options.pop("lw", 1) c = options.pop("c", None) alpha = options.pop("alpha", 1) mesh, u = _inputsort(inputobj) startPoints = mesh.coordinates() u_values = np.array([u(p) for p in mesh.coordinates()]) if not utils.isSequence(u_values[0]): printc("~times Error: cannot show Lines for 1D scalar values!", c=1) exit() endPoints = mesh.coordinates() + u_values if u_values.shape[1] == 2: # u_values is 2D u_values = np.insert(u_values, 2, 0, axis=1) # make it 3d startPoints = np.insert(startPoints, 2, 0, axis=1) # make it 3d endPoints = np.insert(endPoints, 2, 0, axis=1) # make it 3d actor = shapes.Lines( startPoints, endPoints, scale=scale, lw=lw, c=c, alpha=alpha ) actor.mesh = mesh actor.u = u actor.u_values = u_values return actor
[ "def", "MeshLines", "(", "*", "inputobj", ",", "*", "*", "options", ")", ":", "scale", "=", "options", ".", "pop", "(", "\"scale\"", ",", "1", ")", "lw", "=", "options", ".", "pop", "(", "\"lw\"", ",", "1", ")", "c", "=", "options", ".", "pop", "(", "\"c\"", ",", "None", ")", "alpha", "=", "options", ".", "pop", "(", "\"alpha\"", ",", "1", ")", "mesh", ",", "u", "=", "_inputsort", "(", "inputobj", ")", "startPoints", "=", "mesh", ".", "coordinates", "(", ")", "u_values", "=", "np", ".", "array", "(", "[", "u", "(", "p", ")", "for", "p", "in", "mesh", ".", "coordinates", "(", ")", "]", ")", "if", "not", "utils", ".", "isSequence", "(", "u_values", "[", "0", "]", ")", ":", "printc", "(", "\"~times Error: cannot show Lines for 1D scalar values!\"", ",", "c", "=", "1", ")", "exit", "(", ")", "endPoints", "=", "mesh", ".", "coordinates", "(", ")", "+", "u_values", "if", "u_values", ".", "shape", "[", "1", "]", "==", "2", ":", "# u_values is 2D", "u_values", "=", "np", ".", "insert", "(", "u_values", ",", "2", ",", "0", ",", "axis", "=", "1", ")", "# make it 3d", "startPoints", "=", "np", ".", "insert", "(", "startPoints", ",", "2", ",", "0", ",", "axis", "=", "1", ")", "# make it 3d", "endPoints", "=", "np", ".", "insert", "(", "endPoints", ",", "2", ",", "0", ",", "axis", "=", "1", ")", "# make it 3d", "actor", "=", "shapes", ".", "Lines", "(", "startPoints", ",", "endPoints", ",", "scale", "=", "scale", ",", "lw", "=", "lw", ",", "c", "=", "c", ",", "alpha", "=", "alpha", ")", "actor", ".", "mesh", "=", "mesh", "actor", ".", "u", "=", "u", "actor", ".", "u_values", "=", "u_values", "return", "actor" ]
35.457143
20.771429
def get_scalar_value(self, name): """ Get scalar value by name """ if name not in self.scalars: raise InvalidServiceConfiguration( 'Invalid Service Argument Scalar "%s" (not found)' % name ) new_value = self.scalars.get(name) return new_value
[ "def", "get_scalar_value", "(", "self", ",", "name", ")", ":", "if", "name", "not", "in", "self", ".", "scalars", ":", "raise", "InvalidServiceConfiguration", "(", "'Invalid Service Argument Scalar \"%s\" (not found)'", "%", "name", ")", "new_value", "=", "self", ".", "scalars", ".", "get", "(", "name", ")", "return", "new_value" ]
38.375
11.875
def schemas_map(add_generics=False): """ Returns a dictionary of H₂O schemas, indexed by their name. """ m = {} for schema in schemas(): if schema["name"].startswith('AutoML'): continue # Generation code doesn't know how to deal with defaults for complex objects yet if schema["name"].startswith('UserFeedback'): continue # UserFeedback schema contains an AutoMLKeyV3 m[schema["name"]] = schema def find_field(fields, field_name): """Finds a field with the given `field_name` among the list of fields.""" for f in fields: if f["is_inherited"] and f["name"] == field_name: return f raise RuntimeError("Unable to find field %s" % (field_name)) # Add information about the generics. This is rather hacky at the moment. if add_generics: for base, generics in [ # Note: derived classes must come before base classes here ("SharedTreeModelV3", [("P", "ModelParametersSchemaV3"), ("O", "ModelOutputSchemaV3")]), ("ModelSchemaV3", [("P", "ModelParametersSchemaV3"), ("O", "ModelOutputSchemaV3")]), ("SharedTreeV3", [("P", "ModelParametersSchemaV3")]), ("ModelBuilderSchema", [("P", "ModelParametersSchemaV3")]), ]: # Write the generic information about the base class schema = m[base] schema["generics"] = generics generic_map = {long_type: gen_type for gen_type, long_type in generics} generic_index = {geninfo[0]: i for i, geninfo in enumerate(generics)} mapped_fields = {} for field in schema["fields"]: ftype = field["schema_name"] if ftype in generic_map: gen_type = generic_map[ftype] field["schema_name"] = gen_type mapped_fields[field["name"]] = generic_index[gen_type] assert len(mapped_fields) == len(generics), ( "Unable to find generic types %r in base class %s. Schema: %r" % (generic_map, base, {f["name"]: f["schema_name"] for f in schema["fields"]})) # Find all the derived classes, and fill in their derived information for schema_name, schema in m.items(): if schema["superclass"] == base: base_generics = [None] * len(generics) for mapped_field_name, generic_index in mapped_fields.items(): field = find_field(schema["fields"], mapped_field_name) base_generics[generic_index] = field["schema_name"] assert None not in base_generics, ( "Unable to find mapped super types in schema %s: base = %r, map = %r" % (schema_name, base_generics, mapped_fields)) schema["super_generics"] = base_generics return m
[ "def", "schemas_map", "(", "add_generics", "=", "False", ")", ":", "m", "=", "{", "}", "for", "schema", "in", "schemas", "(", ")", ":", "if", "schema", "[", "\"name\"", "]", ".", "startswith", "(", "'AutoML'", ")", ":", "continue", "# Generation code doesn't know how to deal with defaults for complex objects yet", "if", "schema", "[", "\"name\"", "]", ".", "startswith", "(", "'UserFeedback'", ")", ":", "continue", "# UserFeedback schema contains an AutoMLKeyV3", "m", "[", "schema", "[", "\"name\"", "]", "]", "=", "schema", "def", "find_field", "(", "fields", ",", "field_name", ")", ":", "\"\"\"Finds a field with the given `field_name` among the list of fields.\"\"\"", "for", "f", "in", "fields", ":", "if", "f", "[", "\"is_inherited\"", "]", "and", "f", "[", "\"name\"", "]", "==", "field_name", ":", "return", "f", "raise", "RuntimeError", "(", "\"Unable to find field %s\"", "%", "(", "field_name", ")", ")", "# Add information about the generics. This is rather hacky at the moment.", "if", "add_generics", ":", "for", "base", ",", "generics", "in", "[", "# Note: derived classes must come before base classes here", "(", "\"SharedTreeModelV3\"", ",", "[", "(", "\"P\"", ",", "\"ModelParametersSchemaV3\"", ")", ",", "(", "\"O\"", ",", "\"ModelOutputSchemaV3\"", ")", "]", ")", ",", "(", "\"ModelSchemaV3\"", ",", "[", "(", "\"P\"", ",", "\"ModelParametersSchemaV3\"", ")", ",", "(", "\"O\"", ",", "\"ModelOutputSchemaV3\"", ")", "]", ")", ",", "(", "\"SharedTreeV3\"", ",", "[", "(", "\"P\"", ",", "\"ModelParametersSchemaV3\"", ")", "]", ")", ",", "(", "\"ModelBuilderSchema\"", ",", "[", "(", "\"P\"", ",", "\"ModelParametersSchemaV3\"", ")", "]", ")", ",", "]", ":", "# Write the generic information about the base class", "schema", "=", "m", "[", "base", "]", "schema", "[", "\"generics\"", "]", "=", "generics", "generic_map", "=", "{", "long_type", ":", "gen_type", "for", "gen_type", ",", "long_type", "in", "generics", "}", "generic_index", "=", "{", "geninfo", "[", "0", "]", ":", "i", "for", "i", ",", "geninfo", "in", "enumerate", "(", "generics", ")", "}", "mapped_fields", "=", "{", "}", "for", "field", "in", "schema", "[", "\"fields\"", "]", ":", "ftype", "=", "field", "[", "\"schema_name\"", "]", "if", "ftype", "in", "generic_map", ":", "gen_type", "=", "generic_map", "[", "ftype", "]", "field", "[", "\"schema_name\"", "]", "=", "gen_type", "mapped_fields", "[", "field", "[", "\"name\"", "]", "]", "=", "generic_index", "[", "gen_type", "]", "assert", "len", "(", "mapped_fields", ")", "==", "len", "(", "generics", ")", ",", "(", "\"Unable to find generic types %r in base class %s. Schema: %r\"", "%", "(", "generic_map", ",", "base", ",", "{", "f", "[", "\"name\"", "]", ":", "f", "[", "\"schema_name\"", "]", "for", "f", "in", "schema", "[", "\"fields\"", "]", "}", ")", ")", "# Find all the derived classes, and fill in their derived information", "for", "schema_name", ",", "schema", "in", "m", ".", "items", "(", ")", ":", "if", "schema", "[", "\"superclass\"", "]", "==", "base", ":", "base_generics", "=", "[", "None", "]", "*", "len", "(", "generics", ")", "for", "mapped_field_name", ",", "generic_index", "in", "mapped_fields", ".", "items", "(", ")", ":", "field", "=", "find_field", "(", "schema", "[", "\"fields\"", "]", ",", "mapped_field_name", ")", "base_generics", "[", "generic_index", "]", "=", "field", "[", "\"schema_name\"", "]", "assert", "None", "not", "in", "base_generics", ",", "(", "\"Unable to find mapped super types in schema %s: base = %r, map = %r\"", "%", "(", "schema_name", ",", "base_generics", ",", "mapped_fields", ")", ")", "schema", "[", "\"super_generics\"", "]", "=", "base_generics", "return", "m" ]
52.509091
25.836364
def filter_from_query(query, id_field="id", field_map={}): """This returns a filter which actually filters out everything, unlike the preview filter which includes excluded_ids for UI purposes. """ f = groups_filter_from_query(query, field_map=field_map) excluded_ids = query.get("excluded_ids") included_ids = query.get("included_ids") if included_ids: # include these, please if f is None: f = Terms(pk=included_ids) else: f |= Terms(pk=included_ids) if excluded_ids: # exclude these if f is None: f = MatchAll() f &= ~Terms(pk=excluded_ids) return f
[ "def", "filter_from_query", "(", "query", ",", "id_field", "=", "\"id\"", ",", "field_map", "=", "{", "}", ")", ":", "f", "=", "groups_filter_from_query", "(", "query", ",", "field_map", "=", "field_map", ")", "excluded_ids", "=", "query", ".", "get", "(", "\"excluded_ids\"", ")", "included_ids", "=", "query", ".", "get", "(", "\"included_ids\"", ")", "if", "included_ids", ":", "# include these, please", "if", "f", "is", "None", ":", "f", "=", "Terms", "(", "pk", "=", "included_ids", ")", "else", ":", "f", "|=", "Terms", "(", "pk", "=", "included_ids", ")", "if", "excluded_ids", ":", "# exclude these", "if", "f", "is", "None", ":", "f", "=", "MatchAll", "(", ")", "f", "&=", "~", "Terms", "(", "pk", "=", "excluded_ids", ")", "return", "f" ]
32.1
15.55
def request_add(self, req, x, y): """Add two numbers""" r = x + y self._add_result.set_value(r) return ("ok", r)
[ "def", "request_add", "(", "self", ",", "req", ",", "x", ",", "y", ")", ":", "r", "=", "x", "+", "y", "self", ".", "_add_result", ".", "set_value", "(", "r", ")", "return", "(", "\"ok\"", ",", "r", ")" ]
28
9.8
def getmessage(self) -> str: """ parse self into unicode string as message content """ image = {} for key, default in vars(self.__class__).items(): if not key.startswith('_') and key !='' and (not key in vars(QueueMessage).items()): if isinstance(default, datetime.date): image[key] = safe_cast(getattr(self, key, default), str, dformat=self._dateformat) if isinstance(default, datetime.datetime): image[key] = safe_cast(getattr(self, key, default), str, dformat=self._datetimeformat) else: image[key] = getattr(self, key, default) return str(image)
[ "def", "getmessage", "(", "self", ")", "->", "str", ":", "image", "=", "{", "}", "for", "key", ",", "default", "in", "vars", "(", "self", ".", "__class__", ")", ".", "items", "(", ")", ":", "if", "not", "key", ".", "startswith", "(", "'_'", ")", "and", "key", "!=", "''", "and", "(", "not", "key", "in", "vars", "(", "QueueMessage", ")", ".", "items", "(", ")", ")", ":", "if", "isinstance", "(", "default", ",", "datetime", ".", "date", ")", ":", "image", "[", "key", "]", "=", "safe_cast", "(", "getattr", "(", "self", ",", "key", ",", "default", ")", ",", "str", ",", "dformat", "=", "self", ".", "_dateformat", ")", "if", "isinstance", "(", "default", ",", "datetime", ".", "datetime", ")", ":", "image", "[", "key", "]", "=", "safe_cast", "(", "getattr", "(", "self", ",", "key", ",", "default", ")", ",", "str", ",", "dformat", "=", "self", ".", "_datetimeformat", ")", "else", ":", "image", "[", "key", "]", "=", "getattr", "(", "self", ",", "key", ",", "default", ")", "return", "str", "(", "image", ")" ]
63.416667
32.416667
def lpc(x, N=None): """Linear Predictor Coefficients. :param x: :param int N: default is length(X) - 1 :Details: Finds the coefficients :math:`A=(1, a(2), \dots a(N+1))`, of an Nth order forward linear predictor that predicts the current value value of the real-valued time series x based on past samples: .. math:: \hat{x}(n) = -a(2)*x(n-1) - a(3)*x(n-2) - ... - a(N+1)*x(n-N) such that the sum of the squares of the errors .. math:: err(n) = X(n) - Xp(n) is minimized. This function uses the Levinson-Durbin recursion to solve the normal equations that arise from the least-squares formulation. .. seealso:: :func:`levinson`, :func:`aryule`, :func:`prony`, :func:`stmcb` .. todo:: matrix case, references :Example: :: from scipy.signal import lfilter noise = randn(50000,1); % Normalized white Gaussian noise x = filter([1], [1 1/2 1/3 1/4], noise) x = x[45904:50000] x.reshape(4096, 1) x = x[0] Compute the predictor coefficients, estimated signal, prediction error, and autocorrelation sequence of the prediction error: 1.00000 + 0.00000i 0.51711 - 0.00000i 0.33908 - 0.00000i 0.24410 - 0.00000i :: a = lpc(x, 3) est_x = lfilter([0 -a(2:end)],1,x); % Estimated signal e = x - est_x; % Prediction error [acs,lags] = xcorr(e,'coeff'); % ACS of prediction error """ m = len(x) if N is None: N = m - 1 #default value if N is not provided elif N > m-1: #disp('Warning: zero-padding short input sequence') x.resize(N+1) #todo: check this zero-padding. X = fft(x, 2**nextpow2(2.*len(x)-1)) R = real(ifft(abs(X)**2)) R = R/(m-1.) #Biased autocorrelation estimate a, e, ref = LEVINSON(R, N) return a, e
[ "def", "lpc", "(", "x", ",", "N", "=", "None", ")", ":", "m", "=", "len", "(", "x", ")", "if", "N", "is", "None", ":", "N", "=", "m", "-", "1", "#default value if N is not provided", "elif", "N", ">", "m", "-", "1", ":", "#disp('Warning: zero-padding short input sequence')", "x", ".", "resize", "(", "N", "+", "1", ")", "#todo: check this zero-padding.", "X", "=", "fft", "(", "x", ",", "2", "**", "nextpow2", "(", "2.", "*", "len", "(", "x", ")", "-", "1", ")", ")", "R", "=", "real", "(", "ifft", "(", "abs", "(", "X", ")", "**", "2", ")", ")", "R", "=", "R", "/", "(", "m", "-", "1.", ")", "#Biased autocorrelation estimate", "a", ",", "e", ",", "ref", "=", "LEVINSON", "(", "R", ",", "N", ")", "return", "a", ",", "e" ]
28.809524
27.063492
def get_answer_id_for_question(self, question): """Get the answer_id corresponding to the answer given for question by looking at this :class:`~.UserQuestion`'s answer_options. The given :class:`~.Question` instance must have the same id as this :class:`~.UserQuestion`. That this method exists is admittedly somewhat weird. Unfortunately, it seems to be the only way to retrieve this information. """ assert question.id == self.id for answer_option in self.answer_options: if answer_option.text == question.their_answer: return answer_option.id
[ "def", "get_answer_id_for_question", "(", "self", ",", "question", ")", ":", "assert", "question", ".", "id", "==", "self", ".", "id", "for", "answer_option", "in", "self", ".", "answer_options", ":", "if", "answer_option", ".", "text", "==", "question", ".", "their_answer", ":", "return", "answer_option", ".", "id" ]
48.769231
16.307692
def make_fields_unique(self, fields): """ iterates over the row and make each field unique """ for i in range(0, len(fields)): for j in range(i+1, len(fields)): if fields[i] == fields[j]: fields[j] += "'"
[ "def", "make_fields_unique", "(", "self", ",", "fields", ")", ":", "for", "i", "in", "range", "(", "0", ",", "len", "(", "fields", ")", ")", ":", "for", "j", "in", "range", "(", "i", "+", "1", ",", "len", "(", "fields", ")", ")", ":", "if", "fields", "[", "i", "]", "==", "fields", "[", "j", "]", ":", "fields", "[", "j", "]", "+=", "\"'\"" ]
34.625
3.875
def commit(self): """Commit dirty records to the server. This method is automatically called when the `auto_commit` option is set to `True` (default). It can be useful to set the former option to `False` to get better performance by reducing the number of RPC requests generated. With `auto_commit` set to `True` (default behaviour), each time a value is set on a record field a RPC request is sent to the server to update the record: .. doctest:: >>> user = odoo.env.user >>> user.name = "Joe" # write({'name': "Joe"}) >>> user.email = "joe@odoo.net" # write({'email': "joe@odoo.net"}) With `auto_commit` set to `False`, changes on a record are sent all at once when calling the :func:`commit` method: .. doctest:: >>> odoo.config['auto_commit'] = False >>> user = odoo.env.user >>> user.name = "Joe" >>> user.email = "joe@odoo.net" >>> user in odoo.env.dirty True >>> odoo.env.commit() # write({'name': "Joe", 'email': "joe@odoo.net"}) >>> user in odoo.env.dirty False Only one RPC request is generated in the last case. """ # Iterate on a new set, as we remove record during iteration from the # original one for record in set(self.dirty): values = {} for field in record._values_to_write: if record.id in record._values_to_write[field]: value = record._values_to_write[field].pop(record.id) values[field] = value # Store the value in the '_values' dictionary. This # operation is delegated to each field descriptor as some # values can not be stored "as is" (e.g. magic tuples of # 2many fields need to be converted) record.__class__.__dict__[field].store(record, value) record.write(values) self.dirty.remove(record)
[ "def", "commit", "(", "self", ")", ":", "# Iterate on a new set, as we remove record during iteration from the", "# original one", "for", "record", "in", "set", "(", "self", ".", "dirty", ")", ":", "values", "=", "{", "}", "for", "field", "in", "record", ".", "_values_to_write", ":", "if", "record", ".", "id", "in", "record", ".", "_values_to_write", "[", "field", "]", ":", "value", "=", "record", ".", "_values_to_write", "[", "field", "]", ".", "pop", "(", "record", ".", "id", ")", "values", "[", "field", "]", "=", "value", "# Store the value in the '_values' dictionary. This", "# operation is delegated to each field descriptor as some", "# values can not be stored \"as is\" (e.g. magic tuples of", "# 2many fields need to be converted)", "record", ".", "__class__", ".", "__dict__", "[", "field", "]", ".", "store", "(", "record", ",", "value", ")", "record", ".", "write", "(", "values", ")", "self", ".", "dirty", ".", "remove", "(", "record", ")" ]
43.270833
23.0625
def register(self, new_formulas, *args, **kwargs): """ Register formula and meta data. * ``islinear`` - ``True`` if formula is linear, ``False`` if non-linear. * ``args`` - position of arguments * ``units`` - units of returns and arguments as pair of tuples * ``isconstant`` - constant arguments not included in covariance :param new_formulas: new formulas to add to registry. """ kwargs.update(zip(self.meta_names, args)) # call super method, meta must be passed as kwargs! super(FormulaRegistry, self).register(new_formulas, **kwargs)
[ "def", "register", "(", "self", ",", "new_formulas", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "zip", "(", "self", ".", "meta_names", ",", "args", ")", ")", "# call super method, meta must be passed as kwargs!", "super", "(", "FormulaRegistry", ",", "self", ")", ".", "register", "(", "new_formulas", ",", "*", "*", "kwargs", ")" ]
43.785714
19.5
def decode(string, base): """ Given a string (string) and a numeric base (base), decode the string into an integer. Returns the integer """ base = int(base) code_string = get_code_string(base) result = 0 if base == 16: string = string.lower() while len(string) > 0: result *= base result += code_string.find(string[0]) string = string[1:] return result
[ "def", "decode", "(", "string", ",", "base", ")", ":", "base", "=", "int", "(", "base", ")", "code_string", "=", "get_code_string", "(", "base", ")", "result", "=", "0", "if", "base", "==", "16", ":", "string", "=", "string", ".", "lower", "(", ")", "while", "len", "(", "string", ")", ">", "0", ":", "result", "*=", "base", "result", "+=", "code_string", ".", "find", "(", "string", "[", "0", "]", ")", "string", "=", "string", "[", "1", ":", "]", "return", "result" ]
22.944444
15.5
def get_resource_form_for_create(self, resource_record_types): """Gets the resource form for creating new resources. A new form should be requested for each create transaction. arg: resource_record_types (osid.type.Type[]): array of resource record types return: (osid.resource.ResourceForm) - the resource form raise: NullArgument - ``resource_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - unable to get form with requested record types *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceAdminSession.get_resource_form_for_create_template for arg in resource_record_types: if not isinstance(arg, ABCType): raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type') if resource_record_types == []: obj_form = objects.ResourceForm( bin_id=self._catalog_id, runtime=self._runtime, effective_agent_id=self.get_effective_agent_id(), proxy=self._proxy) else: obj_form = objects.ResourceForm( bin_id=self._catalog_id, record_types=resource_record_types, runtime=self._runtime, effective_agent_id=self.get_effective_agent_id(), proxy=self._proxy) self._forms[obj_form.get_id().get_identifier()] = not CREATED return obj_form
[ "def", "get_resource_form_for_create", "(", "self", ",", "resource_record_types", ")", ":", "# Implemented from template for", "# osid.resource.ResourceAdminSession.get_resource_form_for_create_template", "for", "arg", "in", "resource_record_types", ":", "if", "not", "isinstance", "(", "arg", ",", "ABCType", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", "'one or more argument array elements is not a valid OSID Type'", ")", "if", "resource_record_types", "==", "[", "]", ":", "obj_form", "=", "objects", ".", "ResourceForm", "(", "bin_id", "=", "self", ".", "_catalog_id", ",", "runtime", "=", "self", ".", "_runtime", ",", "effective_agent_id", "=", "self", ".", "get_effective_agent_id", "(", ")", ",", "proxy", "=", "self", ".", "_proxy", ")", "else", ":", "obj_form", "=", "objects", ".", "ResourceForm", "(", "bin_id", "=", "self", ".", "_catalog_id", ",", "record_types", "=", "resource_record_types", ",", "runtime", "=", "self", ".", "_runtime", ",", "effective_agent_id", "=", "self", ".", "get_effective_agent_id", "(", ")", ",", "proxy", "=", "self", ".", "_proxy", ")", "self", ".", "_forms", "[", "obj_form", ".", "get_id", "(", ")", ".", "get_identifier", "(", ")", "]", "=", "not", "CREATED", "return", "obj_form" ]
46.138889
17.694444
def convert_windows_path_to_cygwin(path): """Convert a Windows path to a Cygwin path. Just handles the basic case.""" if len(path) > 2 and path[1] == ":" and path[2] == "\\": newpath = cygwin_full_path_prefix + "/" + path[0] if len(path) > 3: newpath += "/" + path[3:] path = newpath path = path.replace("\\", "/") return path
[ "def", "convert_windows_path_to_cygwin", "(", "path", ")", ":", "if", "len", "(", "path", ")", ">", "2", "and", "path", "[", "1", "]", "==", "\":\"", "and", "path", "[", "2", "]", "==", "\"\\\\\"", ":", "newpath", "=", "cygwin_full_path_prefix", "+", "\"/\"", "+", "path", "[", "0", "]", "if", "len", "(", "path", ")", ">", "3", ":", "newpath", "+=", "\"/\"", "+", "path", "[", "3", ":", "]", "path", "=", "newpath", "path", "=", "path", ".", "replace", "(", "\"\\\\\"", ",", "\"/\"", ")", "return", "path" ]
45
12.25
def search_images(q, start=1, count=10, wait=10, asynchronous=False, cached=False): """ Returns a Yahoo images query formatted as a YahooSearch list object. """ service = YAHOO_IMAGES return YahooSearch(q, start, count, service, None, wait, asynchronous, cached)
[ "def", "search_images", "(", "q", ",", "start", "=", "1", ",", "count", "=", "10", ",", "wait", "=", "10", ",", "asynchronous", "=", "False", ",", "cached", "=", "False", ")", ":", "service", "=", "YAHOO_IMAGES", "return", "YahooSearch", "(", "q", ",", "start", ",", "count", ",", "service", ",", "None", ",", "wait", ",", "asynchronous", ",", "cached", ")" ]
40.285714
24.428571
def transform(self, attrs): """Perform all actions on a given attribute dict.""" self.collect(attrs) self.add_missing_implementations() self.fill_attrs(attrs)
[ "def", "transform", "(", "self", ",", "attrs", ")", ":", "self", ".", "collect", "(", "attrs", ")", "self", ".", "add_missing_implementations", "(", ")", "self", ".", "fill_attrs", "(", "attrs", ")" ]
37.2
7.6
def update_org(cls, module): """ Adds the `users` field to the organization model """ try: cls.module_registry[module]["OrgModel"]._meta.get_field("users") except FieldDoesNotExist: cls.module_registry[module]["OrgModel"].add_to_class( "users", models.ManyToManyField( USER_MODEL, through=cls.module_registry[module]["OrgUserModel"].__name__, related_name="%(app_label)s_%(class)s", ), ) cls.module_registry[module]["OrgModel"].invitation_model = cls.module_registry[ module ][ "OrgInviteModel" ]
[ "def", "update_org", "(", "cls", ",", "module", ")", ":", "try", ":", "cls", ".", "module_registry", "[", "module", "]", "[", "\"OrgModel\"", "]", ".", "_meta", ".", "get_field", "(", "\"users\"", ")", "except", "FieldDoesNotExist", ":", "cls", ".", "module_registry", "[", "module", "]", "[", "\"OrgModel\"", "]", ".", "add_to_class", "(", "\"users\"", ",", "models", ".", "ManyToManyField", "(", "USER_MODEL", ",", "through", "=", "cls", ".", "module_registry", "[", "module", "]", "[", "\"OrgUserModel\"", "]", ".", "__name__", ",", "related_name", "=", "\"%(app_label)s_%(class)s\"", ",", ")", ",", ")", "cls", ".", "module_registry", "[", "module", "]", "[", "\"OrgModel\"", "]", ".", "invitation_model", "=", "cls", ".", "module_registry", "[", "module", "]", "[", "\"OrgInviteModel\"", "]" ]
33.761905
21
def is_population_germline(rec): """Identify a germline calls based on annoations with ExAC or other population databases. """ min_count = 50 for k in population_keys: if k in rec.info: val = rec.info.get(k) if "," in val: val = val.split(",")[0] if isinstance(val, (list, tuple)): val = max(val) if int(val) > min_count: return True return False
[ "def", "is_population_germline", "(", "rec", ")", ":", "min_count", "=", "50", "for", "k", "in", "population_keys", ":", "if", "k", "in", "rec", ".", "info", ":", "val", "=", "rec", ".", "info", ".", "get", "(", "k", ")", "if", "\",\"", "in", "val", ":", "val", "=", "val", ".", "split", "(", "\",\"", ")", "[", "0", "]", "if", "isinstance", "(", "val", ",", "(", "list", ",", "tuple", ")", ")", ":", "val", "=", "max", "(", "val", ")", "if", "int", "(", "val", ")", ">", "min_count", ":", "return", "True", "return", "False" ]
32.642857
9.642857
def require_app(app_name, api_style=False): """ Request the application to be automatically loaded. If this is used for "api" style modules, which is imported by a client application, set api_style=True. If this is used for client application module, set api_style=False. """ iterable = (inspect.getmodule(frame[0]) for frame in inspect.stack()) modules = [module for module in iterable if module is not None] if api_style: m = modules[2] # skip a frame for "api" module else: m = modules[1] m._REQUIRED_APP = getattr(m, '_REQUIRED_APP', []) m._REQUIRED_APP.append(app_name) LOG.debug('require_app: %s is required by %s', app_name, m.__name__)
[ "def", "require_app", "(", "app_name", ",", "api_style", "=", "False", ")", ":", "iterable", "=", "(", "inspect", ".", "getmodule", "(", "frame", "[", "0", "]", ")", "for", "frame", "in", "inspect", ".", "stack", "(", ")", ")", "modules", "=", "[", "module", "for", "module", "in", "iterable", "if", "module", "is", "not", "None", "]", "if", "api_style", ":", "m", "=", "modules", "[", "2", "]", "# skip a frame for \"api\" module", "else", ":", "m", "=", "modules", "[", "1", "]", "m", ".", "_REQUIRED_APP", "=", "getattr", "(", "m", ",", "'_REQUIRED_APP'", ",", "[", "]", ")", "m", ".", "_REQUIRED_APP", ".", "append", "(", "app_name", ")", "LOG", ".", "debug", "(", "'require_app: %s is required by %s'", ",", "app_name", ",", "m", ".", "__name__", ")" ]
38.722222
20.166667
def combinePlinkBinaryFiles(prefixes, outPrefix): """Combine Plink binary files. :param prefixes: a list of the prefix of the files that need to be combined. :param outPrefix: the prefix of the output file (the combined file). :type prefixes: list :type outPrefix: str It uses Plink to merge a list of binary files (which is a list of prefixes (strings)), and create the final data set which as ``outPrefix`` as the prefix. """ # The first file is the bfile, the others are the ones to merge outputFile = None try: outputFile = open(outPrefix + ".files_to_merge", "w") except IOError: msg = "%(outPrefix)s.filesToMerge: can't write file" % locals() raise ProgramError(msg) for prefix in prefixes[1:]: print >>outputFile, " ".join([ prefix + i for i in [".bed", ".bim", ".fam"] ]) # Closing the output files outputFile.close() # Runing plink plinkCommand = ["plink", "--noweb", "--bfile", prefixes[0], "--merge-list", outPrefix + ".files_to_merge", "--make-bed", "--out", outPrefix] runCommand(plinkCommand)
[ "def", "combinePlinkBinaryFiles", "(", "prefixes", ",", "outPrefix", ")", ":", "# The first file is the bfile, the others are the ones to merge", "outputFile", "=", "None", "try", ":", "outputFile", "=", "open", "(", "outPrefix", "+", "\".files_to_merge\"", ",", "\"w\"", ")", "except", "IOError", ":", "msg", "=", "\"%(outPrefix)s.filesToMerge: can't write file\"", "%", "locals", "(", ")", "raise", "ProgramError", "(", "msg", ")", "for", "prefix", "in", "prefixes", "[", "1", ":", "]", ":", "print", ">>", "outputFile", ",", "\" \"", ".", "join", "(", "[", "prefix", "+", "i", "for", "i", "in", "[", "\".bed\"", ",", "\".bim\"", ",", "\".fam\"", "]", "]", ")", "# Closing the output files", "outputFile", ".", "close", "(", ")", "# Runing plink", "plinkCommand", "=", "[", "\"plink\"", ",", "\"--noweb\"", ",", "\"--bfile\"", ",", "prefixes", "[", "0", "]", ",", "\"--merge-list\"", ",", "outPrefix", "+", "\".files_to_merge\"", ",", "\"--make-bed\"", ",", "\"--out\"", ",", "outPrefix", "]", "runCommand", "(", "plinkCommand", ")" ]
32.416667
23.222222
def watch_async(self, limit=None, timeout=None): """Non-block method to watch the clipboard changing.""" return self.watch(limit=limit, timeout=timeout)
[ "def", "watch_async", "(", "self", ",", "limit", "=", "None", ",", "timeout", "=", "None", ")", ":", "return", "self", ".", "watch", "(", "limit", "=", "limit", ",", "timeout", "=", "timeout", ")" ]
55.333333
7.666667
def tag2id(self, xs): """Map tag(s) to id(s) Parameters ---------- xs : str or list tag or tags Returns ------- int or list id(s) of tag(s) """ if isinstance(xs, list): return [self._tag2id.get(x, self.UNK) for x in xs] return self._tag2id.get(xs, self.UNK)
[ "def", "tag2id", "(", "self", ",", "xs", ")", ":", "if", "isinstance", "(", "xs", ",", "list", ")", ":", "return", "[", "self", ".", "_tag2id", ".", "get", "(", "x", ",", "self", ".", "UNK", ")", "for", "x", "in", "xs", "]", "return", "self", ".", "_tag2id", ".", "get", "(", "xs", ",", "self", ".", "UNK", ")" ]
22.5
18.4375
def object(self, o_type, o_name=None): """Get a monitored object from the arbiter. Indeed, the arbiter requires the object from its schedulers. It will iterate in its schedulers list until a matching object is found. Else it will return a Json structure containing _status and _message properties. When found, the result is a serialized object which is a Json structure containing: - content: the serialized object content - __sys_python_module__: the python class of the returned object The Alignak unserialize function of the alignak.misc.serialization package allows to restore the initial object. .. code-block:: python from alignak.misc.serialization import unserialize from alignak.objects.hostgroup import Hostgroup raw_data = req.get("http://127.0.0.1:7768/object/hostgroup/allhosts") print("Got: %s / %s" % (raw_data.status_code, raw_data.content)) assert raw_data.status_code == 200 object = raw_data.json() group = unserialize(object, True) assert group.__class__ == Hostgroup assert group.get_name() == 'allhosts' As an example: { "__sys_python_module__": "alignak.objects.hostgroup.Hostgroup", "content": { "uuid": "32248642-97dd-4f39-aaa2-5120112a765d", "name": "", "hostgroup_name": "allhosts", "use": [], "tags": [], "alias": "All Hosts", "notes": "", "definition_order": 100, "register": true, "unknown_members": [], "notes_url": "", "action_url": "", "imported_from": "unknown", "conf_is_correct": true, "configuration_errors": [], "configuration_warnings": [], "realm": "", "downtimes": {}, "hostgroup_members": [], "members": [ "553d47bc-27aa-426c-a664-49c4c0c4a249", "f88093ca-e61b-43ff-a41e-613f7ad2cea2", "df1e2e13-552d-43de-ad2a-fe80ad4ba979", "d3d667dd-f583-4668-9f44-22ef3dcb53ad" ] } } :param o_type: searched object type :type o_type: str :param o_name: searched object name (or uuid) :type o_name: str :return: serialized object information :rtype: str """ for scheduler_link in self.app.conf.schedulers: sched_res = scheduler_link.con.get('object', {'o_type': o_type, 'o_name': o_name}, wait=True) if isinstance(sched_res, dict) and 'content' in sched_res: return sched_res return {'_status': u'ERR', '_message': u'Required %s not found.' % o_type}
[ "def", "object", "(", "self", ",", "o_type", ",", "o_name", "=", "None", ")", ":", "for", "scheduler_link", "in", "self", ".", "app", ".", "conf", ".", "schedulers", ":", "sched_res", "=", "scheduler_link", ".", "con", ".", "get", "(", "'object'", ",", "{", "'o_type'", ":", "o_type", ",", "'o_name'", ":", "o_name", "}", ",", "wait", "=", "True", ")", "if", "isinstance", "(", "sched_res", ",", "dict", ")", "and", "'content'", "in", "sched_res", ":", "return", "sched_res", "return", "{", "'_status'", ":", "u'ERR'", ",", "'_message'", ":", "u'Required %s not found.'", "%", "o_type", "}" ]
40.805556
19.222222
def find_point_bin(self, chi_coords): """ Given a set of coordinates in the chi parameter space, identify the indices of the chi1 and chi2 bins that the point occurs in. Returns these indices. Parameters ----------- chi_coords : numpy.array The position of the point in the chi coordinates. Returns -------- chi1_bin : int Index of the chi_1 bin. chi2_bin : int Index of the chi_2 bin. """ # Identify bin chi1_bin = int((chi_coords[0] - self.chi1_min) // self.bin_spacing) chi2_bin = int((chi_coords[1] - self.chi2_min) // self.bin_spacing) self.check_bin_existence(chi1_bin, chi2_bin) return chi1_bin, chi2_bin
[ "def", "find_point_bin", "(", "self", ",", "chi_coords", ")", ":", "# Identify bin", "chi1_bin", "=", "int", "(", "(", "chi_coords", "[", "0", "]", "-", "self", ".", "chi1_min", ")", "//", "self", ".", "bin_spacing", ")", "chi2_bin", "=", "int", "(", "(", "chi_coords", "[", "1", "]", "-", "self", ".", "chi2_min", ")", "//", "self", ".", "bin_spacing", ")", "self", ".", "check_bin_existence", "(", "chi1_bin", ",", "chi2_bin", ")", "return", "chi1_bin", ",", "chi2_bin" ]
33.173913
19.347826
def _colorize(output): """ Return `output` colorized with Pygments, if available. """ if not pygments: return output # Available styles # ['monokai', 'manni', 'rrt', 'perldoc', 'borland', 'colorful', 'default', # 'murphy', 'vs', 'trac', 'tango', 'fruity', 'autumn', 'bw', 'emacs', # 'vim', 'pastie', 'friendly', 'native'] return pygments.highlight(output, pygments.lexers.PythonLexer(), pygments.formatters.Terminal256Formatter(style='monokai'))
[ "def", "_colorize", "(", "output", ")", ":", "if", "not", "pygments", ":", "return", "output", "# Available styles", "# ['monokai', 'manni', 'rrt', 'perldoc', 'borland', 'colorful', 'default',", "# 'murphy', 'vs', 'trac', 'tango', 'fruity', 'autumn', 'bw', 'emacs',", "# 'vim', 'pastie', 'friendly', 'native']", "return", "pygments", ".", "highlight", "(", "output", ",", "pygments", ".", "lexers", ".", "PythonLexer", "(", ")", ",", "pygments", ".", "formatters", ".", "Terminal256Formatter", "(", "style", "=", "'monokai'", ")", ")" ]
35.785714
17.357143
def get_command(self, name): """ Gets a single command by its unique name. :param str name: Either a fully-qualified XTCE name or an alias in the format ``NAMESPACE/NAME``. :rtype: .Command """ name = adapt_name_for_rest(name) url = '/mdb/{}/commands{}'.format(self._instance, name) response = self._client.get_proto(url) message = mdb_pb2.CommandInfo() message.ParseFromString(response.content) return Command(message)
[ "def", "get_command", "(", "self", ",", "name", ")", ":", "name", "=", "adapt_name_for_rest", "(", "name", ")", "url", "=", "'/mdb/{}/commands{}'", ".", "format", "(", "self", ".", "_instance", ",", "name", ")", "response", "=", "self", ".", "_client", ".", "get_proto", "(", "url", ")", "message", "=", "mdb_pb2", ".", "CommandInfo", "(", ")", "message", ".", "ParseFromString", "(", "response", ".", "content", ")", "return", "Command", "(", "message", ")" ]
37.142857
12.428571
def _b64_to_bstr(b64str): """Deserialize base64 encoded string into string. :param str b64str: response string to be deserialized. :rtype: bytearray :raises: TypeError if string format invalid. """ padding = '=' * (3 - (len(b64str) + 3) % 4) b64str = b64str + padding encoded = b64str.replace('-', '+').replace('_', '/') return b64decode(encoded)
[ "def", "_b64_to_bstr", "(", "b64str", ")", ":", "padding", "=", "'='", "*", "(", "3", "-", "(", "len", "(", "b64str", ")", "+", "3", ")", "%", "4", ")", "b64str", "=", "b64str", "+", "padding", "encoded", "=", "b64str", ".", "replace", "(", "'-'", ",", "'+'", ")", ".", "replace", "(", "'_'", ",", "'/'", ")", "return", "b64decode", "(", "encoded", ")" ]
37.3
10.5
def allocate_sync_ensembles(dynamic, tolerance = 0.1, threshold = 1.0, ignore = None): """! @brief Allocate clusters in line with ensembles of synchronous oscillators where each synchronous ensemble corresponds to only one cluster. @param[in] dynamic (dynamic): Dynamic of each oscillator. @param[in] tolerance (double): Maximum error for allocation of synchronous ensemble oscillators. @param[in] threshold (double): Amlitude trigger when spike is taken into account. @param[in] ignore (bool): Set of indexes that shouldn't be taken into account. @return (list) Grours (lists) of indexes of synchronous oscillators, for example, [ [index_osc1, index_osc3], [index_osc2], [index_osc4, index_osc5] ]. """ descriptors = [ [] for _ in range(len(dynamic[0])) ]; # Check from the end for obtaining result for index_dyn in range(0, len(dynamic[0]), 1): if ((ignore is not None) and (index_dyn in ignore)): continue; time_stop_simulation = len(dynamic) - 1; active_state = False; if (dynamic[time_stop_simulation][index_dyn] > threshold): active_state = True; # if active state is detected, it means we don't have whole oscillatory period for the considered oscillator, should be skipped. if (active_state is True): while ( (dynamic[time_stop_simulation][index_dyn] > threshold) and (time_stop_simulation > 0) ): time_stop_simulation -= 1; # if there are no any oscillation than let's consider it like noise if (time_stop_simulation == 0): continue; # reset active_state = False; desc = [0, 0, 0]; # end, start, average time of oscillation for t in range(time_stop_simulation, 0, -1): if ( (dynamic[t][index_dyn] > threshold) and (active_state is False) ): desc[0] = t; active_state = True; elif ( (dynamic[t][index_dyn] < threshold) and (active_state is True) ): desc[1] = t; active_state = False; break; if (desc == [0, 0, 0]): continue; desc[2] = desc[1] + (desc[0] - desc[1]) / 2.0; descriptors[index_dyn] = desc; # Cluster allocation sync_ensembles = []; desc_sync_ensembles = []; for index_desc in range(0, len(descriptors), 1): if (descriptors[index_desc] == []): continue; if (len(sync_ensembles) == 0): desc_ensemble = descriptors[index_desc]; reducer = (desc_ensemble[0] - desc_ensemble[1]) * tolerance; desc_ensemble[0] = desc_ensemble[2] + reducer; desc_ensemble[1] = desc_ensemble[2] - reducer; desc_sync_ensembles.append(desc_ensemble); sync_ensembles.append([ index_desc ]); else: oscillator_captured = False; for index_ensemble in range(0, len(sync_ensembles), 1): if ( (desc_sync_ensembles[index_ensemble][0] > descriptors[index_desc][2]) and (desc_sync_ensembles[index_ensemble][1] < descriptors[index_desc][2])): sync_ensembles[index_ensemble].append(index_desc); oscillator_captured = True; break; if (oscillator_captured is False): desc_ensemble = descriptors[index_desc]; reducer = (desc_ensemble[0] - desc_ensemble[1]) * tolerance; desc_ensemble[0] = desc_ensemble[2] + reducer; desc_ensemble[1] = desc_ensemble[2] - reducer; desc_sync_ensembles.append(desc_ensemble); sync_ensembles.append([ index_desc ]); return sync_ensembles;
[ "def", "allocate_sync_ensembles", "(", "dynamic", ",", "tolerance", "=", "0.1", ",", "threshold", "=", "1.0", ",", "ignore", "=", "None", ")", ":", "descriptors", "=", "[", "[", "]", "for", "_", "in", "range", "(", "len", "(", "dynamic", "[", "0", "]", ")", ")", "]", "# Check from the end for obtaining result\r", "for", "index_dyn", "in", "range", "(", "0", ",", "len", "(", "dynamic", "[", "0", "]", ")", ",", "1", ")", ":", "if", "(", "(", "ignore", "is", "not", "None", ")", "and", "(", "index_dyn", "in", "ignore", ")", ")", ":", "continue", "time_stop_simulation", "=", "len", "(", "dynamic", ")", "-", "1", "active_state", "=", "False", "if", "(", "dynamic", "[", "time_stop_simulation", "]", "[", "index_dyn", "]", ">", "threshold", ")", ":", "active_state", "=", "True", "# if active state is detected, it means we don't have whole oscillatory period for the considered oscillator, should be skipped.\r", "if", "(", "active_state", "is", "True", ")", ":", "while", "(", "(", "dynamic", "[", "time_stop_simulation", "]", "[", "index_dyn", "]", ">", "threshold", ")", "and", "(", "time_stop_simulation", ">", "0", ")", ")", ":", "time_stop_simulation", "-=", "1", "# if there are no any oscillation than let's consider it like noise\r", "if", "(", "time_stop_simulation", "==", "0", ")", ":", "continue", "# reset\r", "active_state", "=", "False", "desc", "=", "[", "0", ",", "0", ",", "0", "]", "# end, start, average time of oscillation\r", "for", "t", "in", "range", "(", "time_stop_simulation", ",", "0", ",", "-", "1", ")", ":", "if", "(", "(", "dynamic", "[", "t", "]", "[", "index_dyn", "]", ">", "threshold", ")", "and", "(", "active_state", "is", "False", ")", ")", ":", "desc", "[", "0", "]", "=", "t", "active_state", "=", "True", "elif", "(", "(", "dynamic", "[", "t", "]", "[", "index_dyn", "]", "<", "threshold", ")", "and", "(", "active_state", "is", "True", ")", ")", ":", "desc", "[", "1", "]", "=", "t", "active_state", "=", "False", "break", "if", "(", "desc", "==", "[", "0", ",", "0", ",", "0", "]", ")", ":", "continue", "desc", "[", "2", "]", "=", "desc", "[", "1", "]", "+", "(", "desc", "[", "0", "]", "-", "desc", "[", "1", "]", ")", "/", "2.0", "descriptors", "[", "index_dyn", "]", "=", "desc", "# Cluster allocation\r", "sync_ensembles", "=", "[", "]", "desc_sync_ensembles", "=", "[", "]", "for", "index_desc", "in", "range", "(", "0", ",", "len", "(", "descriptors", ")", ",", "1", ")", ":", "if", "(", "descriptors", "[", "index_desc", "]", "==", "[", "]", ")", ":", "continue", "if", "(", "len", "(", "sync_ensembles", ")", "==", "0", ")", ":", "desc_ensemble", "=", "descriptors", "[", "index_desc", "]", "reducer", "=", "(", "desc_ensemble", "[", "0", "]", "-", "desc_ensemble", "[", "1", "]", ")", "*", "tolerance", "desc_ensemble", "[", "0", "]", "=", "desc_ensemble", "[", "2", "]", "+", "reducer", "desc_ensemble", "[", "1", "]", "=", "desc_ensemble", "[", "2", "]", "-", "reducer", "desc_sync_ensembles", ".", "append", "(", "desc_ensemble", ")", "sync_ensembles", ".", "append", "(", "[", "index_desc", "]", ")", "else", ":", "oscillator_captured", "=", "False", "for", "index_ensemble", "in", "range", "(", "0", ",", "len", "(", "sync_ensembles", ")", ",", "1", ")", ":", "if", "(", "(", "desc_sync_ensembles", "[", "index_ensemble", "]", "[", "0", "]", ">", "descriptors", "[", "index_desc", "]", "[", "2", "]", ")", "and", "(", "desc_sync_ensembles", "[", "index_ensemble", "]", "[", "1", "]", "<", "descriptors", "[", "index_desc", "]", "[", "2", "]", ")", ")", ":", "sync_ensembles", "[", "index_ensemble", "]", ".", "append", "(", "index_desc", ")", "oscillator_captured", "=", "True", "break", "if", "(", "oscillator_captured", "is", "False", ")", ":", "desc_ensemble", "=", "descriptors", "[", "index_desc", "]", "reducer", "=", "(", "desc_ensemble", "[", "0", "]", "-", "desc_ensemble", "[", "1", "]", ")", "*", "tolerance", "desc_ensemble", "[", "0", "]", "=", "desc_ensemble", "[", "2", "]", "+", "reducer", "desc_ensemble", "[", "1", "]", "=", "desc_ensemble", "[", "2", "]", "-", "reducer", "desc_sync_ensembles", ".", "append", "(", "desc_ensemble", ")", "sync_ensembles", ".", "append", "(", "[", "index_desc", "]", ")", "return", "sync_ensembles" ]
42.489362
24.414894
def from_point_record(cls, other_point_record, new_point_format): """ Construct a new PackedPointRecord from an existing one with the ability to change to point format while doing so """ array = np.zeros_like(other_point_record.array, dtype=new_point_format.dtype) new_record = cls(array, new_point_format) new_record.copy_fields_from(other_point_record) return new_record
[ "def", "from_point_record", "(", "cls", ",", "other_point_record", ",", "new_point_format", ")", ":", "array", "=", "np", ".", "zeros_like", "(", "other_point_record", ".", "array", ",", "dtype", "=", "new_point_format", ".", "dtype", ")", "new_record", "=", "cls", "(", "array", ",", "new_point_format", ")", "new_record", ".", "copy_fields_from", "(", "other_point_record", ")", "return", "new_record" ]
52.75
13.875
def export_agent(self, parent, agent_uri=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ Exports the specified agent to a ZIP file. Operation <response: ``ExportAgentResponse``, metadata: [google.protobuf.Struct][google.protobuf.Struct]> Example: >>> import dialogflow_v2 >>> >>> client = dialogflow_v2.AgentsClient() >>> >>> parent = client.project_path('[PROJECT]') >>> >>> response = client.export_agent(parent) >>> >>> def callback(operation_future): ... # Handle result. ... result = operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> # Handle metadata. >>> metadata = response.metadata() Args: parent (str): Required. The project that the agent to export is associated with. Format: ``projects/<Project ID>``. agent_uri (str): Optional. The Google Cloud Storage URI to export the agent to. Note: The URI must start with \"gs://\". If left unspecified, the serialized agent is returned inline. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.dialogflow_v2.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if 'export_agent' not in self._inner_api_calls: self._inner_api_calls[ 'export_agent'] = google.api_core.gapic_v1.method.wrap_method( self.transport.export_agent, default_retry=self._method_configs['ExportAgent'].retry, default_timeout=self._method_configs['ExportAgent'] .timeout, client_info=self._client_info, ) request = agent_pb2.ExportAgentRequest( parent=parent, agent_uri=agent_uri, ) operation = self._inner_api_calls['export_agent']( request, retry=retry, timeout=timeout, metadata=metadata) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, agent_pb2.ExportAgentResponse, metadata_type=struct_pb2.Struct, )
[ "def", "export_agent", "(", "self", ",", "parent", ",", "agent_uri", "=", "None", ",", "retry", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "timeout", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "metadata", "=", "None", ")", ":", "# Wrap the transport method to add retry and timeout logic.", "if", "'export_agent'", "not", "in", "self", ".", "_inner_api_calls", ":", "self", ".", "_inner_api_calls", "[", "'export_agent'", "]", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "wrap_method", "(", "self", ".", "transport", ".", "export_agent", ",", "default_retry", "=", "self", ".", "_method_configs", "[", "'ExportAgent'", "]", ".", "retry", ",", "default_timeout", "=", "self", ".", "_method_configs", "[", "'ExportAgent'", "]", ".", "timeout", ",", "client_info", "=", "self", ".", "_client_info", ",", ")", "request", "=", "agent_pb2", ".", "ExportAgentRequest", "(", "parent", "=", "parent", ",", "agent_uri", "=", "agent_uri", ",", ")", "operation", "=", "self", ".", "_inner_api_calls", "[", "'export_agent'", "]", "(", "request", ",", "retry", "=", "retry", ",", "timeout", "=", "timeout", ",", "metadata", "=", "metadata", ")", "return", "google", ".", "api_core", ".", "operation", ".", "from_gapic", "(", "operation", ",", "self", ".", "transport", ".", "_operations_client", ",", "agent_pb2", ".", "ExportAgentResponse", ",", "metadata_type", "=", "struct_pb2", ".", "Struct", ",", ")" ]
42.858974
21.474359
def parse_docstring(self, content): """ Parse the given full docstring, and extract method description, arguments, and return documentation. This method try to find arguments description and types, and put the information in "args_doc" and "signature" members. Also parse return type and description, and put the information in "return_doc" member. All other lines are added to the returned string :param content: The full docstring :type content: str :return: The parsed method description :rtype: str """ if not content: return raw_docstring = '' # We use the helper defined in django admindocs app to remove indentation chars from docstring, # and parse it as title, body, metadata. We don't use metadata for now. docstring = trim_docstring(content) for line in docstring.split('\n'): # Empty line if not line: raw_docstring += '\n' continue param_match = PARAM_REXP.match(line) if param_match: param_name, description = param_match.group(1, 2) if param_name == 'kwargs': continue doc = self.args_doc.setdefault(param_name, {}) doc['text'] = description continue param_type_match = PARAM_TYPE_REXP.match(line) if param_type_match: param_name, param_type = param_type_match.group(1, 2) if param_name == 'kwargs': continue doc = self.args_doc.setdefault(param_name, {}) doc['type'] = param_type self.signature.append(param_type) continue return_match = RETURN_REXP.match(line) if return_match: return_description = return_match.group(1) self.return_doc['text'] = return_description continue return_type_match = RETURN_TYPE_REXP.match(line) if return_type_match: return_description = return_type_match.group(1) self.return_doc['type'] = return_description self.signature.insert(0, return_description) continue # Line doesn't match with known args/return regular expressions, # add the line to raw help text raw_docstring += line + '\n' return raw_docstring
[ "def", "parse_docstring", "(", "self", ",", "content", ")", ":", "if", "not", "content", ":", "return", "raw_docstring", "=", "''", "# We use the helper defined in django admindocs app to remove indentation chars from docstring,", "# and parse it as title, body, metadata. We don't use metadata for now.", "docstring", "=", "trim_docstring", "(", "content", ")", "for", "line", "in", "docstring", ".", "split", "(", "'\\n'", ")", ":", "# Empty line", "if", "not", "line", ":", "raw_docstring", "+=", "'\\n'", "continue", "param_match", "=", "PARAM_REXP", ".", "match", "(", "line", ")", "if", "param_match", ":", "param_name", ",", "description", "=", "param_match", ".", "group", "(", "1", ",", "2", ")", "if", "param_name", "==", "'kwargs'", ":", "continue", "doc", "=", "self", ".", "args_doc", ".", "setdefault", "(", "param_name", ",", "{", "}", ")", "doc", "[", "'text'", "]", "=", "description", "continue", "param_type_match", "=", "PARAM_TYPE_REXP", ".", "match", "(", "line", ")", "if", "param_type_match", ":", "param_name", ",", "param_type", "=", "param_type_match", ".", "group", "(", "1", ",", "2", ")", "if", "param_name", "==", "'kwargs'", ":", "continue", "doc", "=", "self", ".", "args_doc", ".", "setdefault", "(", "param_name", ",", "{", "}", ")", "doc", "[", "'type'", "]", "=", "param_type", "self", ".", "signature", ".", "append", "(", "param_type", ")", "continue", "return_match", "=", "RETURN_REXP", ".", "match", "(", "line", ")", "if", "return_match", ":", "return_description", "=", "return_match", ".", "group", "(", "1", ")", "self", ".", "return_doc", "[", "'text'", "]", "=", "return_description", "continue", "return_type_match", "=", "RETURN_TYPE_REXP", ".", "match", "(", "line", ")", "if", "return_type_match", ":", "return_description", "=", "return_type_match", ".", "group", "(", "1", ")", "self", ".", "return_doc", "[", "'type'", "]", "=", "return_description", "self", ".", "signature", ".", "insert", "(", "0", ",", "return_description", ")", "continue", "# Line doesn't match with known args/return regular expressions,", "# add the line to raw help text", "raw_docstring", "+=", "line", "+", "'\\n'", "return", "raw_docstring" ]
36.279412
21.955882
def values(self): """Gets the user enter max and min values of where the raster points should appear on the y-axis :returns: (float, float) -- (min, max) y-values to bound the raster plot by """ lower = float(self.lowerSpnbx.value()) upper = float(self.upperSpnbx.value()) return (lower, upper)
[ "def", "values", "(", "self", ")", ":", "lower", "=", "float", "(", "self", ".", "lowerSpnbx", ".", "value", "(", ")", ")", "upper", "=", "float", "(", "self", ".", "upperSpnbx", ".", "value", "(", ")", ")", "return", "(", "lower", ",", "upper", ")" ]
38.222222
15.333333
def _unpack(c, tmp, package, version, git_url=None): """ Download + unpack given package into temp dir ``tmp``. Return ``(real_version, source)`` where ``real_version`` is the "actual" version downloaded (e.g. if a Git master was indicated, it will be the SHA of master HEAD) and ``source`` is the source directory (relative to unpacked source) to import into ``<project>/vendor``. """ real_version = version[:] source = None if git_url: pass # git clone into tempdir # git checkout <version> # set target to checkout # if version does not look SHA-ish: # in the checkout, obtain SHA from that branch # set real_version to that value else: cwd = os.getcwd() print("Moving into temp dir %s" % tmp) os.chdir(tmp) try: # Nab from index. Skip wheels; we want to unpack an sdist. flags = "--download=. --build=build --no-use-wheel" cmd = "pip install %s %s==%s" % (flags, package, version) c.run(cmd) # Identify basename # TODO: glob is bad here because pip install --download gets all # dependencies too! ugh. Figure out best approach for that. globs = [] globexpr = "" for extension, opener in ( ("zip", "unzip"), ("tgz", "tar xzvf"), ("tar.gz", "tar xzvf"), ): globexpr = "*.{0}".format(extension) globs = glob(globexpr) if globs: break archive = os.path.basename(globs[0]) source, _, _ = archive.rpartition(".{0}".format(extension)) c.run("{0} {1}".format(opener, globexpr)) finally: os.chdir(cwd) return real_version, source
[ "def", "_unpack", "(", "c", ",", "tmp", ",", "package", ",", "version", ",", "git_url", "=", "None", ")", ":", "real_version", "=", "version", "[", ":", "]", "source", "=", "None", "if", "git_url", ":", "pass", "# git clone into tempdir", "# git checkout <version>", "# set target to checkout", "# if version does not look SHA-ish:", "# in the checkout, obtain SHA from that branch", "# set real_version to that value", "else", ":", "cwd", "=", "os", ".", "getcwd", "(", ")", "print", "(", "\"Moving into temp dir %s\"", "%", "tmp", ")", "os", ".", "chdir", "(", "tmp", ")", "try", ":", "# Nab from index. Skip wheels; we want to unpack an sdist.", "flags", "=", "\"--download=. --build=build --no-use-wheel\"", "cmd", "=", "\"pip install %s %s==%s\"", "%", "(", "flags", ",", "package", ",", "version", ")", "c", ".", "run", "(", "cmd", ")", "# Identify basename", "# TODO: glob is bad here because pip install --download gets all", "# dependencies too! ugh. Figure out best approach for that.", "globs", "=", "[", "]", "globexpr", "=", "\"\"", "for", "extension", ",", "opener", "in", "(", "(", "\"zip\"", ",", "\"unzip\"", ")", ",", "(", "\"tgz\"", ",", "\"tar xzvf\"", ")", ",", "(", "\"tar.gz\"", ",", "\"tar xzvf\"", ")", ",", ")", ":", "globexpr", "=", "\"*.{0}\"", ".", "format", "(", "extension", ")", "globs", "=", "glob", "(", "globexpr", ")", "if", "globs", ":", "break", "archive", "=", "os", ".", "path", ".", "basename", "(", "globs", "[", "0", "]", ")", "source", ",", "_", ",", "_", "=", "archive", ".", "rpartition", "(", "\".{0}\"", ".", "format", "(", "extension", ")", ")", "c", ".", "run", "(", "\"{0} {1}\"", ".", "format", "(", "opener", ",", "globexpr", ")", ")", "finally", ":", "os", ".", "chdir", "(", "cwd", ")", "return", "real_version", ",", "source" ]
37.791667
17.083333
def draw_best_fit(X, y, ax, estimator='linear', **kwargs): """ Uses Scikit-Learn to fit a model to X and y then uses the resulting model to predict the curve based on the X values. This curve is drawn to the ax (matplotlib axis) which must be passed as the third variable. The estimator function can be one of the following: - ``'linear'``: Uses OLS to fit the regression - ``'quadratic'``: Uses OLS with Polynomial order 2 - ``'exponential'``: Not implemented yet - ``'log'``: Not implemented yet - ``'select_best'``: Selects the best fit via MSE The remaining keyword arguments are passed to ax.plot to define and describe the line of best fit. Parameters ---------- X : ndarray or DataFrame of shape n x m A matrix of n instances with m features y : ndarray or Series of length n An array or series of target or class values ax : matplotlib Axes, default: None The axis to plot the figure on. If None is passed in the current axes will be used (or generated if required). estimator : string, default: 'linear' The name of the estimator function used to draw the best fit line. The estimator can currently be one of linear, quadratic, exponential, log, or select_best. The select best method uses the minimum MSE to select the best fit line. kwargs : dict Keyword arguments to pass to the matplotlib plot function to style and label the line of best fit. By default, the standard line color is used unless the color keyword argument is passed in. Returns ------- ax : matplotlib Axes The axes with the line drawn on it. """ # Estimators are the types of best fit lines that can be drawn. estimators = { LINEAR: fit_linear, # Uses OLS to fit the regression QUADRATIC: fit_quadratic, # Uses OLS with Polynomial order 2 EXPONENTIAL: fit_exponential, # Not implemented yet LOG: fit_log, # Not implemented yet SELECT_BEST: fit_select_best, # Selects the best fit via MSE } # Check to make sure that a correct estimator value was passed in. if estimator not in estimators: raise YellowbrickValueError( "'{}' not a valid type of estimator; choose from {}".format( estimator, ", ".join(estimators.keys()) ) ) # Then collect the estimator function from the mapping. estimator = estimators[estimator] # Ensure that X and y are the same length if len(X) != len(y): raise YellowbrickValueError(( "X and y must have same length:" " X len {} doesn't match y len {}!" ).format(len(X), len(y))) # Ensure that X and y are np.arrays X = np.array(X) y = np.array(y) # Verify that X is a two dimensional array for Scikit-Learn esitmators # and that its dimensions are (n, 1) where n is the number of rows. if X.ndim < 2: X = X[:,np.newaxis] # Reshape X into the correct dimensions if X.ndim > 2: raise YellowbrickValueError( "X must be a (1,) or (n,1) dimensional array not {}".format(X.shape) ) # Verify that y is a (n,) dimensional array if y.ndim > 1: raise YellowbrickValueError( "y must be a (1,) dimensional array not {}".format(y.shape) ) # Uses the estimator to fit the data and get the model back. model = estimator(X, y) # Set the color if not passed in. if 'c' not in kwargs and 'color' not in kwargs: kwargs['color'] = LINE_COLOR # Get the current working axes ax = ax or plt.gca() # Plot line of best fit onto the axes that were passed in. # TODO: determine if xlim or X.min(), X.max() are better params xr = np.linspace(*ax.get_xlim(), num=100) ax.plot(xr, model.predict(xr[:,np.newaxis]), **kwargs) return ax
[ "def", "draw_best_fit", "(", "X", ",", "y", ",", "ax", ",", "estimator", "=", "'linear'", ",", "*", "*", "kwargs", ")", ":", "# Estimators are the types of best fit lines that can be drawn.", "estimators", "=", "{", "LINEAR", ":", "fit_linear", ",", "# Uses OLS to fit the regression", "QUADRATIC", ":", "fit_quadratic", ",", "# Uses OLS with Polynomial order 2", "EXPONENTIAL", ":", "fit_exponential", ",", "# Not implemented yet", "LOG", ":", "fit_log", ",", "# Not implemented yet", "SELECT_BEST", ":", "fit_select_best", ",", "# Selects the best fit via MSE", "}", "# Check to make sure that a correct estimator value was passed in.", "if", "estimator", "not", "in", "estimators", ":", "raise", "YellowbrickValueError", "(", "\"'{}' not a valid type of estimator; choose from {}\"", ".", "format", "(", "estimator", ",", "\", \"", ".", "join", "(", "estimators", ".", "keys", "(", ")", ")", ")", ")", "# Then collect the estimator function from the mapping.", "estimator", "=", "estimators", "[", "estimator", "]", "# Ensure that X and y are the same length", "if", "len", "(", "X", ")", "!=", "len", "(", "y", ")", ":", "raise", "YellowbrickValueError", "(", "(", "\"X and y must have same length:\"", "\" X len {} doesn't match y len {}!\"", ")", ".", "format", "(", "len", "(", "X", ")", ",", "len", "(", "y", ")", ")", ")", "# Ensure that X and y are np.arrays", "X", "=", "np", ".", "array", "(", "X", ")", "y", "=", "np", ".", "array", "(", "y", ")", "# Verify that X is a two dimensional array for Scikit-Learn esitmators", "# and that its dimensions are (n, 1) where n is the number of rows.", "if", "X", ".", "ndim", "<", "2", ":", "X", "=", "X", "[", ":", ",", "np", ".", "newaxis", "]", "# Reshape X into the correct dimensions", "if", "X", ".", "ndim", ">", "2", ":", "raise", "YellowbrickValueError", "(", "\"X must be a (1,) or (n,1) dimensional array not {}\"", ".", "format", "(", "X", ".", "shape", ")", ")", "# Verify that y is a (n,) dimensional array", "if", "y", ".", "ndim", ">", "1", ":", "raise", "YellowbrickValueError", "(", "\"y must be a (1,) dimensional array not {}\"", ".", "format", "(", "y", ".", "shape", ")", ")", "# Uses the estimator to fit the data and get the model back.", "model", "=", "estimator", "(", "X", ",", "y", ")", "# Set the color if not passed in.", "if", "'c'", "not", "in", "kwargs", "and", "'color'", "not", "in", "kwargs", ":", "kwargs", "[", "'color'", "]", "=", "LINE_COLOR", "# Get the current working axes", "ax", "=", "ax", "or", "plt", ".", "gca", "(", ")", "# Plot line of best fit onto the axes that were passed in.", "# TODO: determine if xlim or X.min(), X.max() are better params", "xr", "=", "np", ".", "linspace", "(", "*", "ax", ".", "get_xlim", "(", ")", ",", "num", "=", "100", ")", "ax", ".", "plot", "(", "xr", ",", "model", ".", "predict", "(", "xr", "[", ":", ",", "np", ".", "newaxis", "]", ")", ",", "*", "*", "kwargs", ")", "return", "ax" ]
35.752294
22.908257
def silhouette_score(X, labels, metric='euclidean', sample_size=None, random_state=None, **kwds): """Compute the mean Silhouette Coefficient of all samples. The Silhouette Coefficient is calculated using the mean intra-cluster distance (``a``) and the mean nearest-cluster distance (``b``) for each sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a, b)``. To clarify, ``b`` is the distance between a sample and the nearest cluster that the sample is not a part of. Note that Silhouette Coefficient is only defined if number of labels is 2 <= n_labels <= n_samples - 1. This function returns the mean Silhouette Coefficient over all samples. To obtain the values for each sample, use :func:`silhouette_samples`. The best value is 1 and the worst value is -1. Values near 0 indicate overlapping clusters. Negative values generally indicate that a sample has been assigned to the wrong cluster, as a different cluster is more similar. Read more in the :ref:`User Guide <silhouette_coefficient>`. Parameters ---------- X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \ [n_samples_a, n_features] otherwise Array of pairwise distances between samples, or a feature array. labels : array, shape = [n_samples] Predicted labels for each sample. metric : string, or callable The metric to use when calculating distance between instances in a feature array. If metric is a string, it must be one of the options allowed by :func:`metrics.pairwise.pairwise_distances <sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance array itself, use ``metric="precomputed"``. sample_size : int or None The size of the sample to use when computing the Silhouette Coefficient on a random subset of the data. If ``sample_size is None``, no sampling is used. random_state : int, RandomState instance or None, optional (default=None) The generator used to randomly select a subset of samples. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Used when ``sample_size is not None``. **kwds : optional keyword parameters Any further parameters are passed directly to the distance function. If using a scipy.spatial.distance metric, the parameters are still metric dependent. See the scipy docs for usage examples. Returns ------- silhouette : float Mean Silhouette Coefficient for all samples. References ---------- .. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the Interpretation and Validation of Cluster Analysis". Computational and Applied Mathematics 20: 53-65. <http://www.sciencedirect.com/science/article/pii/0377042787901257>`_ .. [2] `Wikipedia entry on the Silhouette Coefficient <https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_ """ if sample_size is not None: X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr']) random_state = check_random_state(random_state) indices = random_state.permutation(X.shape[0])[:sample_size] if metric == "precomputed": X, labels = X[indices].T[indices].T, labels[indices] else: X, labels = X[indices], labels[indices] return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
[ "def", "silhouette_score", "(", "X", ",", "labels", ",", "metric", "=", "'euclidean'", ",", "sample_size", "=", "None", ",", "random_state", "=", "None", ",", "*", "*", "kwds", ")", ":", "if", "sample_size", "is", "not", "None", ":", "X", ",", "labels", "=", "check_X_y", "(", "X", ",", "labels", ",", "accept_sparse", "=", "[", "'csc'", ",", "'csr'", "]", ")", "random_state", "=", "check_random_state", "(", "random_state", ")", "indices", "=", "random_state", ".", "permutation", "(", "X", ".", "shape", "[", "0", "]", ")", "[", ":", "sample_size", "]", "if", "metric", "==", "\"precomputed\"", ":", "X", ",", "labels", "=", "X", "[", "indices", "]", ".", "T", "[", "indices", "]", ".", "T", ",", "labels", "[", "indices", "]", "else", ":", "X", ",", "labels", "=", "X", "[", "indices", "]", ",", "labels", "[", "indices", "]", "return", "np", ".", "mean", "(", "silhouette_samples", "(", "X", ",", "labels", ",", "metric", "=", "metric", ",", "*", "*", "kwds", ")", ")" ]
45.25
26.4875
def run_autoapi(app): """ Load AutoAPI data from the filesystem. """ if not app.config.autoapi_dirs: raise ExtensionError("You must configure an autoapi_dirs setting") # Make sure the paths are full normalized_dirs = [] autoapi_dirs = app.config.autoapi_dirs if isinstance(autoapi_dirs, str): autoapi_dirs = [autoapi_dirs] for path in autoapi_dirs: if os.path.isabs(path): normalized_dirs.append(path) else: normalized_dirs.append(os.path.normpath(os.path.join(app.confdir, path))) for _dir in normalized_dirs: if not os.path.exists(_dir): raise ExtensionError( "AutoAPI Directory `{dir}` not found. " "Please check your `autoapi_dirs` setting.".format(dir=_dir) ) normalized_root = os.path.normpath( os.path.join(app.confdir, app.config.autoapi_root) ) url_root = os.path.join("/", app.config.autoapi_root) sphinx_mapper = default_backend_mapping[app.config.autoapi_type] sphinx_mapper_obj = sphinx_mapper( app, template_dir=app.config.autoapi_template_dir, url_root=url_root ) app.env.autoapi_mapper = sphinx_mapper_obj if app.config.autoapi_file_patterns: file_patterns = app.config.autoapi_file_patterns else: file_patterns = default_file_mapping.get(app.config.autoapi_type, []) if app.config.autoapi_ignore: ignore_patterns = app.config.autoapi_ignore else: ignore_patterns = default_ignore_patterns.get(app.config.autoapi_type, []) if ".rst" in app.config.source_suffix: out_suffix = ".rst" elif ".txt" in app.config.source_suffix: out_suffix = ".txt" else: # Fallback to first suffix listed out_suffix = app.config.source_suffix[0] # Actual meat of the run. LOGGER.info(bold("[AutoAPI] ") + darkgreen("Loading Data")) sphinx_mapper_obj.load( patterns=file_patterns, dirs=normalized_dirs, ignore=ignore_patterns ) LOGGER.info(bold("[AutoAPI] ") + darkgreen("Mapping Data")) sphinx_mapper_obj.map(options=app.config.autoapi_options) if app.config.autoapi_generate_api_docs: LOGGER.info(bold("[AutoAPI] ") + darkgreen("Rendering Data")) sphinx_mapper_obj.output_rst(root=normalized_root, source_suffix=out_suffix)
[ "def", "run_autoapi", "(", "app", ")", ":", "if", "not", "app", ".", "config", ".", "autoapi_dirs", ":", "raise", "ExtensionError", "(", "\"You must configure an autoapi_dirs setting\"", ")", "# Make sure the paths are full", "normalized_dirs", "=", "[", "]", "autoapi_dirs", "=", "app", ".", "config", ".", "autoapi_dirs", "if", "isinstance", "(", "autoapi_dirs", ",", "str", ")", ":", "autoapi_dirs", "=", "[", "autoapi_dirs", "]", "for", "path", "in", "autoapi_dirs", ":", "if", "os", ".", "path", ".", "isabs", "(", "path", ")", ":", "normalized_dirs", ".", "append", "(", "path", ")", "else", ":", "normalized_dirs", ".", "append", "(", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "app", ".", "confdir", ",", "path", ")", ")", ")", "for", "_dir", "in", "normalized_dirs", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "_dir", ")", ":", "raise", "ExtensionError", "(", "\"AutoAPI Directory `{dir}` not found. \"", "\"Please check your `autoapi_dirs` setting.\"", ".", "format", "(", "dir", "=", "_dir", ")", ")", "normalized_root", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "app", ".", "confdir", ",", "app", ".", "config", ".", "autoapi_root", ")", ")", "url_root", "=", "os", ".", "path", ".", "join", "(", "\"/\"", ",", "app", ".", "config", ".", "autoapi_root", ")", "sphinx_mapper", "=", "default_backend_mapping", "[", "app", ".", "config", ".", "autoapi_type", "]", "sphinx_mapper_obj", "=", "sphinx_mapper", "(", "app", ",", "template_dir", "=", "app", ".", "config", ".", "autoapi_template_dir", ",", "url_root", "=", "url_root", ")", "app", ".", "env", ".", "autoapi_mapper", "=", "sphinx_mapper_obj", "if", "app", ".", "config", ".", "autoapi_file_patterns", ":", "file_patterns", "=", "app", ".", "config", ".", "autoapi_file_patterns", "else", ":", "file_patterns", "=", "default_file_mapping", ".", "get", "(", "app", ".", "config", ".", "autoapi_type", ",", "[", "]", ")", "if", "app", ".", "config", ".", "autoapi_ignore", ":", "ignore_patterns", "=", "app", ".", "config", ".", "autoapi_ignore", "else", ":", "ignore_patterns", "=", "default_ignore_patterns", ".", "get", "(", "app", ".", "config", ".", "autoapi_type", ",", "[", "]", ")", "if", "\".rst\"", "in", "app", ".", "config", ".", "source_suffix", ":", "out_suffix", "=", "\".rst\"", "elif", "\".txt\"", "in", "app", ".", "config", ".", "source_suffix", ":", "out_suffix", "=", "\".txt\"", "else", ":", "# Fallback to first suffix listed", "out_suffix", "=", "app", ".", "config", ".", "source_suffix", "[", "0", "]", "# Actual meat of the run.", "LOGGER", ".", "info", "(", "bold", "(", "\"[AutoAPI] \"", ")", "+", "darkgreen", "(", "\"Loading Data\"", ")", ")", "sphinx_mapper_obj", ".", "load", "(", "patterns", "=", "file_patterns", ",", "dirs", "=", "normalized_dirs", ",", "ignore", "=", "ignore_patterns", ")", "LOGGER", ".", "info", "(", "bold", "(", "\"[AutoAPI] \"", ")", "+", "darkgreen", "(", "\"Mapping Data\"", ")", ")", "sphinx_mapper_obj", ".", "map", "(", "options", "=", "app", ".", "config", ".", "autoapi_options", ")", "if", "app", ".", "config", ".", "autoapi_generate_api_docs", ":", "LOGGER", ".", "info", "(", "bold", "(", "\"[AutoAPI] \"", ")", "+", "darkgreen", "(", "\"Rendering Data\"", ")", ")", "sphinx_mapper_obj", ".", "output_rst", "(", "root", "=", "normalized_root", ",", "source_suffix", "=", "out_suffix", ")" ]
34.492537
20.641791
def patchURL(self, url, headers, body): """ Request a URL using the HTTP method PATCH. """ return self._load_resource("PATCH", url, headers, body)
[ "def", "patchURL", "(", "self", ",", "url", ",", "headers", ",", "body", ")", ":", "return", "self", ".", "_load_resource", "(", "\"PATCH\"", ",", "url", ",", "headers", ",", "body", ")" ]
34.8
6.8
def setData(self, index, value, role=Qt.EditRole): """ Reimplements the :meth:`QAbstractItemModel.setData` method. :param index: Index. :type index: QModelIndex :param value: Value. :type value: QVariant :param role: Role. :type role: int :return: Method success. :rtype: bool """ if not index.isValid(): return False node = self.get_node(index) if role == Qt.DisplayRole or role == Qt.EditRole: value = foundations.strings.to_string(value.toString()) roles = {Qt.DisplayRole: value, Qt.EditRole: value} else: roles = {role: value} if index.column() == 0: if (node and hasattr(node, "roles")): node.roles.update(roles) node.name = value else: attribute = self.get_attribute(node, index.column()) if (attribute and hasattr(attribute, "roles")): attribute.roles.update(roles) attribute.value = value self.dataChanged.emit(index, index) return True
[ "def", "setData", "(", "self", ",", "index", ",", "value", ",", "role", "=", "Qt", ".", "EditRole", ")", ":", "if", "not", "index", ".", "isValid", "(", ")", ":", "return", "False", "node", "=", "self", ".", "get_node", "(", "index", ")", "if", "role", "==", "Qt", ".", "DisplayRole", "or", "role", "==", "Qt", ".", "EditRole", ":", "value", "=", "foundations", ".", "strings", ".", "to_string", "(", "value", ".", "toString", "(", ")", ")", "roles", "=", "{", "Qt", ".", "DisplayRole", ":", "value", ",", "Qt", ".", "EditRole", ":", "value", "}", "else", ":", "roles", "=", "{", "role", ":", "value", "}", "if", "index", ".", "column", "(", ")", "==", "0", ":", "if", "(", "node", "and", "hasattr", "(", "node", ",", "\"roles\"", ")", ")", ":", "node", ".", "roles", ".", "update", "(", "roles", ")", "node", ".", "name", "=", "value", "else", ":", "attribute", "=", "self", ".", "get_attribute", "(", "node", ",", "index", ".", "column", "(", ")", ")", "if", "(", "attribute", "and", "hasattr", "(", "attribute", ",", "\"roles\"", ")", ")", ":", "attribute", ".", "roles", ".", "update", "(", "roles", ")", "attribute", ".", "value", "=", "value", "self", ".", "dataChanged", ".", "emit", "(", "index", ",", "index", ")", "return", "True" ]
30.972222
16.527778
def generate_words(files): """ Transform list of files to list of words, removing new line character and replace name entity '<NE>...</NE>' and abbreviation '<AB>...</AB>' symbol """ repls = {'<NE>' : '','</NE>' : '','<AB>': '','</AB>': ''} words_all = [] for i, file in enumerate(files): lines = open(file, 'r') for line in lines: line = reduce(lambda a, kv: a.replace(*kv), repls.items(), line) words = [word for word in line.split("|") if word is not '\n'] words_all.extend(words) return words_all
[ "def", "generate_words", "(", "files", ")", ":", "repls", "=", "{", "'<NE>'", ":", "''", ",", "'</NE>'", ":", "''", ",", "'<AB>'", ":", "''", ",", "'</AB>'", ":", "''", "}", "words_all", "=", "[", "]", "for", "i", ",", "file", "in", "enumerate", "(", "files", ")", ":", "lines", "=", "open", "(", "file", ",", "'r'", ")", "for", "line", "in", "lines", ":", "line", "=", "reduce", "(", "lambda", "a", ",", "kv", ":", "a", ".", "replace", "(", "*", "kv", ")", ",", "repls", ".", "items", "(", ")", ",", "line", ")", "words", "=", "[", "word", "for", "word", "in", "line", ".", "split", "(", "\"|\"", ")", "if", "word", "is", "not", "'\\n'", "]", "words_all", ".", "extend", "(", "words", ")", "return", "words_all" ]
33.764706
18.470588
def check_venv(self): """ Ensure we're inside a virtualenv. """ if self.zappa: venv = self.zappa.get_current_venv() else: # Just for `init`, when we don't have settings yet. venv = Zappa.get_current_venv() if not venv: raise ClickException( click.style("Zappa", bold=True) + " requires an " + click.style("active virtual environment", bold=True, fg="red") + "!\n" + "Learn more about virtual environments here: " + click.style("http://docs.python-guide.org/en/latest/dev/virtualenvs/", bold=False, fg="cyan"))
[ "def", "check_venv", "(", "self", ")", ":", "if", "self", ".", "zappa", ":", "venv", "=", "self", ".", "zappa", ".", "get_current_venv", "(", ")", "else", ":", "# Just for `init`, when we don't have settings yet.", "venv", "=", "Zappa", ".", "get_current_venv", "(", ")", "if", "not", "venv", ":", "raise", "ClickException", "(", "click", ".", "style", "(", "\"Zappa\"", ",", "bold", "=", "True", ")", "+", "\" requires an \"", "+", "click", ".", "style", "(", "\"active virtual environment\"", ",", "bold", "=", "True", ",", "fg", "=", "\"red\"", ")", "+", "\"!\\n\"", "+", "\"Learn more about virtual environments here: \"", "+", "click", ".", "style", "(", "\"http://docs.python-guide.org/en/latest/dev/virtualenvs/\"", ",", "bold", "=", "False", ",", "fg", "=", "\"cyan\"", ")", ")" ]
55.545455
31.272727
def clearScreen(cls): """Clear the screen""" if "win32" in sys.platform: os.system('cls') elif "linux" in sys.platform: os.system('clear') elif 'darwin' in sys.platform: os.system('clear') else: cit.err("No clearScreen for " + sys.platform)
[ "def", "clearScreen", "(", "cls", ")", ":", "if", "\"win32\"", "in", "sys", ".", "platform", ":", "os", ".", "system", "(", "'cls'", ")", "elif", "\"linux\"", "in", "sys", ".", "platform", ":", "os", ".", "system", "(", "'clear'", ")", "elif", "'darwin'", "in", "sys", ".", "platform", ":", "os", ".", "system", "(", "'clear'", ")", "else", ":", "cit", ".", "err", "(", "\"No clearScreen for \"", "+", "sys", ".", "platform", ")" ]
31.9
10.5
def clone_scope(self, source_scope, name=None, status=None, start_date=None, due_date=None, description=None, tags=None, team=None, asynchronous=False): """ Clone a Scope. This will clone a scope if the client has the right to do so. Sufficient permissions to clone a scope are a superuser, a user in the `GG:Configurators` group and a user that is Scope manager of the scope to be clone and member of the `GG:Managers` group as well. If no additional arguments are provided, the values of the `source_scope` are used for the new scope. .. versionadded: 2.6 :param source_scope: Scope object to be cloned itself :type source_scope: :class:`models.Scope` :param name: (optional) new name of the scope :type name: basestring or None :param status: (optional) statis of the new scope :type status: one of :class:`enums.ScopeStatus` :param tags: (optional) list of new scope tags :type tags: list or None :param start_date: (optional) start date of the to be cloned scope :type start_date: datetime or None :param due_date: (optional) due data of the to be cloned scope :type due_date: datetime or None :param description: (optional) description of the new scope :type description: basestring or None :param team: (optional) team_id or Team object to assign membership of scope to a team. :type team: basestring or :class:`models.Team` or None # :param scope_options: (optional) dictionary with scope options (NO EFFECT) # :type scope_options: dict or None :param asynchronous: (optional) option to use asynchronous cloning of the scope, default to False. :type asynchronous: bool or None :return: New scope that is cloned :rtype: :class:`models.Scope` :raises IllegalArgumentError: When the provided arguments are incorrect :raises APIError: When the server is unable to clone the scope (eg. permissions) """ if not isinstance(source_scope, Scope): raise IllegalArgumentError('`source_scope` should be a `Scope` object') data_dict = {'id': source_scope.id, 'async': asynchronous} if name is not None: if not isinstance(name, (string_types, text_type)): raise IllegalArgumentError("`name` should be a string") data_dict['name'] = str(name) if start_date is not None: if isinstance(start_date, datetime.datetime): if not start_date.tzinfo: warnings.warn("The duedate '{}' is naive and not timezone aware, use pytz.timezone info. " "This date is interpreted as UTC time.".format(start_date.isoformat(sep=' '))) data_dict['start_date'] = start_date.isoformat(sep='T') else: raise IllegalArgumentError('Start date should be a datetime.datetime() object') if due_date is not None: if isinstance(due_date, datetime.datetime): if not due_date.tzinfo: warnings.warn("The duedate '{}' is naive and not timezone aware, use pytz.timezone info. " "This date is interpreted as UTC time.".format(due_date.isoformat(sep=' '))) data_dict['due_date'] = due_date.isoformat(sep='T') else: raise IllegalArgumentError('Due date should be a datetime.datetime() object') if description is not None: if not isinstance(description, (text_type, string_types)): raise IllegalArgumentError("`description` should be a string") else: data_dict['text'] = description if status is not None: if status not in ScopeStatus.values(): raise IllegalArgumentError("`status` should be one of '{}'".format(ScopeStatus.values())) else: data_dict['status'] = str(status) if tags is not None: if not isinstance(tags, list): raise IllegalArgumentError("'Tags' should be provided as a list, was provided as '{}'". format(type(tags))) if not (all([isinstance(t, (str, text_type)) for t in tags])): raise IllegalArgumentError("Each tag in the list of tags should be provided as a string") data_dict['tags'] = tags if team is not None: if isinstance(team, Team): team_id = team.id elif is_uuid(team): team_id = team elif isinstance(team, (text_type, string_types)): team_id = self.team(name=team).id else: raise IllegalArgumentError("`team` should be a name of an existing team or UUID of a team") data_dict['team'] = team_id url = self._build_url('scopes') query_params = dict(select_action='clone') response = self._request('POST', url, params=query_params, json=data_dict) if response.status_code != requests.codes.created: # pragma: no cover if response.status_code == requests.codes.forbidden: raise ForbiddenError("Could not clone scope, {}: {}".format(str(response), response.content)) else: raise APIError("Could not clone scope, {}: {}".format(str(response), response.content)) return Scope(response.json()['results'][0], client=source_scope._client)
[ "def", "clone_scope", "(", "self", ",", "source_scope", ",", "name", "=", "None", ",", "status", "=", "None", ",", "start_date", "=", "None", ",", "due_date", "=", "None", ",", "description", "=", "None", ",", "tags", "=", "None", ",", "team", "=", "None", ",", "asynchronous", "=", "False", ")", ":", "if", "not", "isinstance", "(", "source_scope", ",", "Scope", ")", ":", "raise", "IllegalArgumentError", "(", "'`source_scope` should be a `Scope` object'", ")", "data_dict", "=", "{", "'id'", ":", "source_scope", ".", "id", ",", "'async'", ":", "asynchronous", "}", "if", "name", "is", "not", "None", ":", "if", "not", "isinstance", "(", "name", ",", "(", "string_types", ",", "text_type", ")", ")", ":", "raise", "IllegalArgumentError", "(", "\"`name` should be a string\"", ")", "data_dict", "[", "'name'", "]", "=", "str", "(", "name", ")", "if", "start_date", "is", "not", "None", ":", "if", "isinstance", "(", "start_date", ",", "datetime", ".", "datetime", ")", ":", "if", "not", "start_date", ".", "tzinfo", ":", "warnings", ".", "warn", "(", "\"The duedate '{}' is naive and not timezone aware, use pytz.timezone info. \"", "\"This date is interpreted as UTC time.\"", ".", "format", "(", "start_date", ".", "isoformat", "(", "sep", "=", "' '", ")", ")", ")", "data_dict", "[", "'start_date'", "]", "=", "start_date", ".", "isoformat", "(", "sep", "=", "'T'", ")", "else", ":", "raise", "IllegalArgumentError", "(", "'Start date should be a datetime.datetime() object'", ")", "if", "due_date", "is", "not", "None", ":", "if", "isinstance", "(", "due_date", ",", "datetime", ".", "datetime", ")", ":", "if", "not", "due_date", ".", "tzinfo", ":", "warnings", ".", "warn", "(", "\"The duedate '{}' is naive and not timezone aware, use pytz.timezone info. \"", "\"This date is interpreted as UTC time.\"", ".", "format", "(", "due_date", ".", "isoformat", "(", "sep", "=", "' '", ")", ")", ")", "data_dict", "[", "'due_date'", "]", "=", "due_date", ".", "isoformat", "(", "sep", "=", "'T'", ")", "else", ":", "raise", "IllegalArgumentError", "(", "'Due date should be a datetime.datetime() object'", ")", "if", "description", "is", "not", "None", ":", "if", "not", "isinstance", "(", "description", ",", "(", "text_type", ",", "string_types", ")", ")", ":", "raise", "IllegalArgumentError", "(", "\"`description` should be a string\"", ")", "else", ":", "data_dict", "[", "'text'", "]", "=", "description", "if", "status", "is", "not", "None", ":", "if", "status", "not", "in", "ScopeStatus", ".", "values", "(", ")", ":", "raise", "IllegalArgumentError", "(", "\"`status` should be one of '{}'\"", ".", "format", "(", "ScopeStatus", ".", "values", "(", ")", ")", ")", "else", ":", "data_dict", "[", "'status'", "]", "=", "str", "(", "status", ")", "if", "tags", "is", "not", "None", ":", "if", "not", "isinstance", "(", "tags", ",", "list", ")", ":", "raise", "IllegalArgumentError", "(", "\"'Tags' should be provided as a list, was provided as '{}'\"", ".", "format", "(", "type", "(", "tags", ")", ")", ")", "if", "not", "(", "all", "(", "[", "isinstance", "(", "t", ",", "(", "str", ",", "text_type", ")", ")", "for", "t", "in", "tags", "]", ")", ")", ":", "raise", "IllegalArgumentError", "(", "\"Each tag in the list of tags should be provided as a string\"", ")", "data_dict", "[", "'tags'", "]", "=", "tags", "if", "team", "is", "not", "None", ":", "if", "isinstance", "(", "team", ",", "Team", ")", ":", "team_id", "=", "team", ".", "id", "elif", "is_uuid", "(", "team", ")", ":", "team_id", "=", "team", "elif", "isinstance", "(", "team", ",", "(", "text_type", ",", "string_types", ")", ")", ":", "team_id", "=", "self", ".", "team", "(", "name", "=", "team", ")", ".", "id", "else", ":", "raise", "IllegalArgumentError", "(", "\"`team` should be a name of an existing team or UUID of a team\"", ")", "data_dict", "[", "'team'", "]", "=", "team_id", "url", "=", "self", ".", "_build_url", "(", "'scopes'", ")", "query_params", "=", "dict", "(", "select_action", "=", "'clone'", ")", "response", "=", "self", ".", "_request", "(", "'POST'", ",", "url", ",", "params", "=", "query_params", ",", "json", "=", "data_dict", ")", "if", "response", ".", "status_code", "!=", "requests", ".", "codes", ".", "created", ":", "# pragma: no cover", "if", "response", ".", "status_code", "==", "requests", ".", "codes", ".", "forbidden", ":", "raise", "ForbiddenError", "(", "\"Could not clone scope, {}: {}\"", ".", "format", "(", "str", "(", "response", ")", ",", "response", ".", "content", ")", ")", "else", ":", "raise", "APIError", "(", "\"Could not clone scope, {}: {}\"", ".", "format", "(", "str", "(", "response", ")", ",", "response", ".", "content", ")", ")", "return", "Scope", "(", "response", ".", "json", "(", ")", "[", "'results'", "]", "[", "0", "]", ",", "client", "=", "source_scope", ".", "_client", ")" ]
50.672727
27.218182
def weight_from_comm(self, v, comm): """ The total number of edges (or sum of weights) to node ``v`` from community ``comm``. See Also -------- :func:`~VertexPartition.MutableVertexPartition.weight_to_comm` """ return _c_leiden._MutableVertexPartition_weight_from_comm(self._partition, v, comm)
[ "def", "weight_from_comm", "(", "self", ",", "v", ",", "comm", ")", ":", "return", "_c_leiden", ".", "_MutableVertexPartition_weight_from_comm", "(", "self", ".", "_partition", ",", "v", ",", "comm", ")" ]
35
21.111111
def is_permitted(self, permission_s): """ :param permission_s: a collection of 1..N permissions :type permission_s: List of authz_abcs.Permission object(s) or String(s) :returns: a List of tuple(s), containing the authz_abcs.Permission and a Boolean indicating whether the permission is granted """ if self.authorized: self.check_security_manager() return (self.security_manager.is_permitted( self.identifiers, permission_s)) msg = 'Cannot check permission when user isn\'t authenticated nor remembered' raise ValueError(msg)
[ "def", "is_permitted", "(", "self", ",", "permission_s", ")", ":", "if", "self", ".", "authorized", ":", "self", ".", "check_security_manager", "(", ")", "return", "(", "self", ".", "security_manager", ".", "is_permitted", "(", "self", ".", "identifiers", ",", "permission_s", ")", ")", "msg", "=", "'Cannot check permission when user isn\\'t authenticated nor remembered'", "raise", "ValueError", "(", "msg", ")" ]
42.6
20.733333
def audit_1_1(self): """1.1 Avoid the use of the "root" account (Scored)""" for row in self.credential_report: if row["user"] == "<root_account>": for field in "password_last_used", "access_key_1_last_used_date", "access_key_2_last_used_date": if row[field] != "N/A" and self.parse_date(row[field]) > datetime.now(tzutc()) - timedelta(days=1): raise Exception("Root account last used less than a day ago ({})".format(field))
[ "def", "audit_1_1", "(", "self", ")", ":", "for", "row", "in", "self", ".", "credential_report", ":", "if", "row", "[", "\"user\"", "]", "==", "\"<root_account>\"", ":", "for", "field", "in", "\"password_last_used\"", ",", "\"access_key_1_last_used_date\"", ",", "\"access_key_2_last_used_date\"", ":", "if", "row", "[", "field", "]", "!=", "\"N/A\"", "and", "self", ".", "parse_date", "(", "row", "[", "field", "]", ")", ">", "datetime", ".", "now", "(", "tzutc", "(", ")", ")", "-", "timedelta", "(", "days", "=", "1", ")", ":", "raise", "Exception", "(", "\"Root account last used less than a day ago ({})\"", ".", "format", "(", "field", ")", ")" ]
72.285714
34.857143
def get_open_spaces(board): """Given a representation of the board, returns a list of open spaces.""" open_spaces = [] for i in range(3): for j in range(3): if board[i][j] == 0: open_spaces.append(encode_pos(i, j)) return open_spaces
[ "def", "get_open_spaces", "(", "board", ")", ":", "open_spaces", "=", "[", "]", "for", "i", "in", "range", "(", "3", ")", ":", "for", "j", "in", "range", "(", "3", ")", ":", "if", "board", "[", "i", "]", "[", "j", "]", "==", "0", ":", "open_spaces", ".", "append", "(", "encode_pos", "(", "i", ",", "j", ")", ")", "return", "open_spaces" ]
31.5
13.875
def find_minimum_spanning_tree_as_subgraph(graph): """Calculates a minimum spanning tree and returns a graph representation.""" edge_list = find_minimum_spanning_tree(graph) subgraph = get_subgraph_from_edge_list(graph, edge_list) return subgraph
[ "def", "find_minimum_spanning_tree_as_subgraph", "(", "graph", ")", ":", "edge_list", "=", "find_minimum_spanning_tree", "(", "graph", ")", "subgraph", "=", "get_subgraph_from_edge_list", "(", "graph", ",", "edge_list", ")", "return", "subgraph" ]
43
16.666667
def get_recently_played_games(self, steamID, count=0, format=None): """Request a list of recently played games by a given steam id. steamID: The users ID count: Number of games to return. (0 is all recent games.) format: Return format. None defaults to json. (json, xml, vdf) """ parameters = {'steamid' : steamID, 'count' : count} if format is not None: parameters['format'] = format url = self.create_request_url(self.interface, 'GetRecentlyPlayedGames', 1, parameters) data = self.retrieve_request(url) return self.return_data(data, format=format)
[ "def", "get_recently_played_games", "(", "self", ",", "steamID", ",", "count", "=", "0", ",", "format", "=", "None", ")", ":", "parameters", "=", "{", "'steamid'", ":", "steamID", ",", "'count'", ":", "count", "}", "if", "format", "is", "not", "None", ":", "parameters", "[", "'format'", "]", "=", "format", "url", "=", "self", ".", "create_request_url", "(", "self", ".", "interface", ",", "'GetRecentlyPlayedGames'", ",", "1", ",", "parameters", ")", "data", "=", "self", ".", "retrieve_request", "(", "url", ")", "return", "self", ".", "return_data", "(", "data", ",", "format", "=", "format", ")" ]
42.8
18.4
def get_parameter_definitions(self): """Get the parameter definitions to submit to CloudFormation. Any variable definition whose `type` is an instance of `CFNType` will be returned as a CloudFormation Parameter. Returns: dict: parameter definitions. Keys are parameter names, the values are dicts containing key/values for various parameter properties. """ output = {} for var_name, attrs in self.defined_variables().items(): var_type = attrs.get("type") if isinstance(var_type, CFNType): cfn_attrs = copy.deepcopy(attrs) cfn_attrs["type"] = var_type.parameter_type output[var_name] = cfn_attrs return output
[ "def", "get_parameter_definitions", "(", "self", ")", ":", "output", "=", "{", "}", "for", "var_name", ",", "attrs", "in", "self", ".", "defined_variables", "(", ")", ".", "items", "(", ")", ":", "var_type", "=", "attrs", ".", "get", "(", "\"type\"", ")", "if", "isinstance", "(", "var_type", ",", "CFNType", ")", ":", "cfn_attrs", "=", "copy", ".", "deepcopy", "(", "attrs", ")", "cfn_attrs", "[", "\"type\"", "]", "=", "var_type", ".", "parameter_type", "output", "[", "var_name", "]", "=", "cfn_attrs", "return", "output" ]
38.6
18.7
def remove(self, label): """Remove a label. Args: label (gkeepapi.node.Label): The Label object. """ if label.id in self._labels: self._labels[label.id] = None self._dirty = True
[ "def", "remove", "(", "self", ",", "label", ")", ":", "if", "label", ".", "id", "in", "self", ".", "_labels", ":", "self", ".", "_labels", "[", "label", ".", "id", "]", "=", "None", "self", ".", "_dirty", "=", "True" ]
26.111111
13.333333
def age(self, year, month=2, day=1): """Returns the age of the player on a given date. :year: int representing the year. :month: int representing the month (1-12). :day: int representing the day within the month (1-31). :returns: Age in years as a float. """ doc = self.get_main_doc() date_string = doc('span[itemprop="birthDate"]').attr('data-birth') regex = r'(\d{4})\-(\d{2})\-(\d{2})' date_args = map(int, re.match(regex, date_string).groups()) birth_date = datetime.date(*date_args) age_date = datetime.date(year=year, month=month, day=day) delta = age_date - birth_date age = delta.days / 365. return age
[ "def", "age", "(", "self", ",", "year", ",", "month", "=", "2", ",", "day", "=", "1", ")", ":", "doc", "=", "self", ".", "get_main_doc", "(", ")", "date_string", "=", "doc", "(", "'span[itemprop=\"birthDate\"]'", ")", ".", "attr", "(", "'data-birth'", ")", "regex", "=", "r'(\\d{4})\\-(\\d{2})\\-(\\d{2})'", "date_args", "=", "map", "(", "int", ",", "re", ".", "match", "(", "regex", ",", "date_string", ")", ".", "groups", "(", ")", ")", "birth_date", "=", "datetime", ".", "date", "(", "*", "date_args", ")", "age_date", "=", "datetime", ".", "date", "(", "year", "=", "year", ",", "month", "=", "month", ",", "day", "=", "day", ")", "delta", "=", "age_date", "-", "birth_date", "age", "=", "delta", ".", "days", "/", "365.", "return", "age" ]
42.058824
12.764706
def help(rest): """Help (this command)""" rs = rest.strip() if rs: # give help for matching commands for handler in Handler._registry: if handler.name == rs.lower(): yield '!%s: %s' % (handler.name, handler.doc) break else: yield "command not found" return # give help for all commands def mk_entries(): handlers = ( handler for handler in Handler._registry if type(handler) is pmxbot.core.CommandHandler ) handlers = sorted(handlers, key=operator.attrgetter('name')) for handler in handlers: res = "!" + handler.name if handler.aliases: alias_names = (alias.name for alias in handler.aliases) res += " (%s)" % ', '.join(alias_names) yield res o = io.StringIO(" ".join(mk_entries())) more = o.read(160) while more: yield more time.sleep(0.3) more = o.read(160)
[ "def", "help", "(", "rest", ")", ":", "rs", "=", "rest", ".", "strip", "(", ")", "if", "rs", ":", "# give help for matching commands", "for", "handler", "in", "Handler", ".", "_registry", ":", "if", "handler", ".", "name", "==", "rs", ".", "lower", "(", ")", ":", "yield", "'!%s: %s'", "%", "(", "handler", ".", "name", ",", "handler", ".", "doc", ")", "break", "else", ":", "yield", "\"command not found\"", "return", "# give help for all commands", "def", "mk_entries", "(", ")", ":", "handlers", "=", "(", "handler", "for", "handler", "in", "Handler", ".", "_registry", "if", "type", "(", "handler", ")", "is", "pmxbot", ".", "core", ".", "CommandHandler", ")", "handlers", "=", "sorted", "(", "handlers", ",", "key", "=", "operator", ".", "attrgetter", "(", "'name'", ")", ")", "for", "handler", "in", "handlers", ":", "res", "=", "\"!\"", "+", "handler", ".", "name", "if", "handler", ".", "aliases", ":", "alias_names", "=", "(", "alias", ".", "name", "for", "alias", "in", "handler", ".", "aliases", ")", "res", "+=", "\" (%s)\"", "%", "', '", ".", "join", "(", "alias_names", ")", "yield", "res", "o", "=", "io", ".", "StringIO", "(", "\" \"", ".", "join", "(", "mk_entries", "(", ")", ")", ")", "more", "=", "o", ".", "read", "(", "160", ")", "while", "more", ":", "yield", "more", "time", ".", "sleep", "(", "0.3", ")", "more", "=", "o", ".", "read", "(", "160", ")" ]
24.212121
19.121212
def schema_key_for(self, seq_no: int) -> SchemaKey: """ Get schema key for schema by sequence number if known, None for no such schema in cache. :param seq_no: sequence number :return: corresponding schema key or None """ LOGGER.debug('SchemaCache.schema_key_for >>> seq_no: %s', seq_no) rv = self._seq_no2schema_key.get(seq_no, None) LOGGER.debug('SchemaCache.schema_key_for <<< %s', rv) return rv
[ "def", "schema_key_for", "(", "self", ",", "seq_no", ":", "int", ")", "->", "SchemaKey", ":", "LOGGER", ".", "debug", "(", "'SchemaCache.schema_key_for >>> seq_no: %s'", ",", "seq_no", ")", "rv", "=", "self", ".", "_seq_no2schema_key", ".", "get", "(", "seq_no", ",", "None", ")", "LOGGER", ".", "debug", "(", "'SchemaCache.schema_key_for <<< %s'", ",", "rv", ")", "return", "rv" ]
32.928571
23.5
def resolve(self, dispatcher, node): """ For the given node, resolve it into the scope it was declared at, and if one was found, return its value. """ scope = self.identifiers.get(node) if not scope: return node.value return scope.resolve(node.value)
[ "def", "resolve", "(", "self", ",", "dispatcher", ",", "node", ")", ":", "scope", "=", "self", ".", "identifiers", ".", "get", "(", "node", ")", "if", "not", "scope", ":", "return", "node", ".", "value", "return", "scope", ".", "resolve", "(", "node", ".", "value", ")" ]
31
11.6
def __remove_dashboard_menu(self, kibiter_major): """Remove existing menu for dashboard, if any. Usually, we remove the menu before creating a new one. :param kibiter_major: major version of kibiter """ logger.info("Removing old dashboard menu, if any") if kibiter_major == "6": metadashboard = ".kibana/doc/metadashboard" else: metadashboard = ".kibana/metadashboard/main" menu_url = urijoin(self.conf['es_enrichment']['url'], metadashboard) self.grimoire_con.delete(menu_url)
[ "def", "__remove_dashboard_menu", "(", "self", ",", "kibiter_major", ")", ":", "logger", ".", "info", "(", "\"Removing old dashboard menu, if any\"", ")", "if", "kibiter_major", "==", "\"6\"", ":", "metadashboard", "=", "\".kibana/doc/metadashboard\"", "else", ":", "metadashboard", "=", "\".kibana/metadashboard/main\"", "menu_url", "=", "urijoin", "(", "self", ".", "conf", "[", "'es_enrichment'", "]", "[", "'url'", "]", ",", "metadashboard", ")", "self", ".", "grimoire_con", ".", "delete", "(", "menu_url", ")" ]
40.142857
17.642857
def _check_generic_pos(self, *tokens): """Check if the different tokens were logged in one record, any level.""" for record in self.records: if all(token in record.message for token in tokens): return # didn't exit, all tokens are not present in the same record msgs = ["Tokens {} not found, all was logged is...".format(tokens)] for record in self.records: msgs.append(" {:9s} {!r}".format(record.levelname, record.message)) self.test_instance.fail("\n".join(msgs))
[ "def", "_check_generic_pos", "(", "self", ",", "*", "tokens", ")", ":", "for", "record", "in", "self", ".", "records", ":", "if", "all", "(", "token", "in", "record", ".", "message", "for", "token", "in", "tokens", ")", ":", "return", "# didn't exit, all tokens are not present in the same record", "msgs", "=", "[", "\"Tokens {} not found, all was logged is...\"", ".", "format", "(", "tokens", ")", "]", "for", "record", "in", "self", ".", "records", ":", "msgs", ".", "append", "(", "\" {:9s} {!r}\"", ".", "format", "(", "record", ".", "levelname", ",", "record", ".", "message", ")", ")", "self", ".", "test_instance", ".", "fail", "(", "\"\\n\"", ".", "join", "(", "msgs", ")", ")" ]
49.818182
18.818182
def _xfer_file(self, source_file=None, source_config=None, dest_file=None, file_system=None, TransferClass=FileTransfer): """Transfer file to remote device. By default, this will use Secure Copy if self.inline_transfer is set, then will use Netmiko InlineTransfer method to transfer inline using either SSH or telnet (plus TCL onbox). Return (status, msg) status = boolean msg = details on what happened """ if not source_file and not source_config: raise ValueError("File source not specified for transfer.") if not dest_file or not file_system: raise ValueError("Destination file or file system not specified.") if source_file: kwargs = dict(ssh_conn=self.device, source_file=source_file, dest_file=dest_file, direction='put', file_system=file_system) elif source_config: kwargs = dict(ssh_conn=self.device, source_config=source_config, dest_file=dest_file, direction='put', file_system=file_system) enable_scp = True if self.inline_transfer: enable_scp = False with TransferClass(**kwargs) as transfer: # Check if file already exists and has correct MD5 if transfer.check_file_exists() and transfer.compare_md5(): msg = "File already exists and has correct MD5: no SCP needed" return (True, msg) if not transfer.verify_space_available(): msg = "Insufficient space available on remote device" return (False, msg) if enable_scp: transfer.enable_scp() # Transfer file transfer.transfer_file() # Compares MD5 between local-remote files if transfer.verify_file(): msg = "File successfully transferred to remote device" return (True, msg) else: msg = "File transfer to remote device failed" return (False, msg) return (False, '')
[ "def", "_xfer_file", "(", "self", ",", "source_file", "=", "None", ",", "source_config", "=", "None", ",", "dest_file", "=", "None", ",", "file_system", "=", "None", ",", "TransferClass", "=", "FileTransfer", ")", ":", "if", "not", "source_file", "and", "not", "source_config", ":", "raise", "ValueError", "(", "\"File source not specified for transfer.\"", ")", "if", "not", "dest_file", "or", "not", "file_system", ":", "raise", "ValueError", "(", "\"Destination file or file system not specified.\"", ")", "if", "source_file", ":", "kwargs", "=", "dict", "(", "ssh_conn", "=", "self", ".", "device", ",", "source_file", "=", "source_file", ",", "dest_file", "=", "dest_file", ",", "direction", "=", "'put'", ",", "file_system", "=", "file_system", ")", "elif", "source_config", ":", "kwargs", "=", "dict", "(", "ssh_conn", "=", "self", ".", "device", ",", "source_config", "=", "source_config", ",", "dest_file", "=", "dest_file", ",", "direction", "=", "'put'", ",", "file_system", "=", "file_system", ")", "enable_scp", "=", "True", "if", "self", ".", "inline_transfer", ":", "enable_scp", "=", "False", "with", "TransferClass", "(", "*", "*", "kwargs", ")", "as", "transfer", ":", "# Check if file already exists and has correct MD5", "if", "transfer", ".", "check_file_exists", "(", ")", "and", "transfer", ".", "compare_md5", "(", ")", ":", "msg", "=", "\"File already exists and has correct MD5: no SCP needed\"", "return", "(", "True", ",", "msg", ")", "if", "not", "transfer", ".", "verify_space_available", "(", ")", ":", "msg", "=", "\"Insufficient space available on remote device\"", "return", "(", "False", ",", "msg", ")", "if", "enable_scp", ":", "transfer", ".", "enable_scp", "(", ")", "# Transfer file", "transfer", ".", "transfer_file", "(", ")", "# Compares MD5 between local-remote files", "if", "transfer", ".", "verify_file", "(", ")", ":", "msg", "=", "\"File successfully transferred to remote device\"", "return", "(", "True", ",", "msg", ")", "else", ":", "msg", "=", "\"File transfer to remote device failed\"", "return", "(", "False", ",", "msg", ")", "return", "(", "False", ",", "''", ")" ]
41.98
22.08
def wait(self): """Waits and returns received messages. If running in compatibility mode for older uWSGI versions, it also sends messages that have been queued by send(). A return value of None means that connection was closed. This must be called repeatedly. For uWSGI < 2.1.x it must be called from the main greenlet.""" while True: if self._req_ctx is not None: try: msg = uwsgi.websocket_recv(request_context=self._req_ctx) except IOError: # connection closed return None return self._decode_received(msg) else: # we wake up at least every 3 seconds to let uWSGI # do its ping/ponging event_set = self._event.wait(timeout=3) if event_set: self._event.clear() # maybe there is something to send msgs = [] while True: try: msgs.append(self._send_queue.get(block=False)) except gevent.queue.Empty: break for msg in msgs: self._send(msg) # maybe there is something to receive, if not, at least # ensure uWSGI does its ping/ponging try: msg = uwsgi.websocket_recv_nb() except IOError: # connection closed self._select_greenlet.kill() return None if msg: # message available return self._decode_received(msg)
[ "def", "wait", "(", "self", ")", ":", "while", "True", ":", "if", "self", ".", "_req_ctx", "is", "not", "None", ":", "try", ":", "msg", "=", "uwsgi", ".", "websocket_recv", "(", "request_context", "=", "self", ".", "_req_ctx", ")", "except", "IOError", ":", "# connection closed", "return", "None", "return", "self", ".", "_decode_received", "(", "msg", ")", "else", ":", "# we wake up at least every 3 seconds to let uWSGI", "# do its ping/ponging", "event_set", "=", "self", ".", "_event", ".", "wait", "(", "timeout", "=", "3", ")", "if", "event_set", ":", "self", ".", "_event", ".", "clear", "(", ")", "# maybe there is something to send", "msgs", "=", "[", "]", "while", "True", ":", "try", ":", "msgs", ".", "append", "(", "self", ".", "_send_queue", ".", "get", "(", "block", "=", "False", ")", ")", "except", "gevent", ".", "queue", ".", "Empty", ":", "break", "for", "msg", "in", "msgs", ":", "self", ".", "_send", "(", "msg", ")", "# maybe there is something to receive, if not, at least", "# ensure uWSGI does its ping/ponging", "try", ":", "msg", "=", "uwsgi", ".", "websocket_recv_nb", "(", ")", "except", "IOError", ":", "# connection closed", "self", ".", "_select_greenlet", ".", "kill", "(", ")", "return", "None", "if", "msg", ":", "# message available", "return", "self", ".", "_decode_received", "(", "msg", ")" ]
44.526316
14.026316
def privatekey_to_publickey(private_key_bin: bytes) -> bytes: """ Returns public key in bitcoins 'bin' encoding. """ if not ishash(private_key_bin): raise ValueError('private_key_bin format mismatch. maybe hex encoded?') return keys.PrivateKey(private_key_bin).public_key.to_bytes()
[ "def", "privatekey_to_publickey", "(", "private_key_bin", ":", "bytes", ")", "->", "bytes", ":", "if", "not", "ishash", "(", "private_key_bin", ")", ":", "raise", "ValueError", "(", "'private_key_bin format mismatch. maybe hex encoded?'", ")", "return", "keys", ".", "PrivateKey", "(", "private_key_bin", ")", ".", "public_key", ".", "to_bytes", "(", ")" ]
59.6
18
def source(source_id=None, **kwargs): """Get a source of economic data.""" if source_id is not None: kwargs['source_id'] = source_id elif 'id' in kwargs: source_id = kwargs.pop('id') kwargs['source_id'] = source_id if 'releases' in kwargs: kwargs.pop('releases') path = 'releases' else: path = None return Fred().source(path, **kwargs)
[ "def", "source", "(", "source_id", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "source_id", "is", "not", "None", ":", "kwargs", "[", "'source_id'", "]", "=", "source_id", "elif", "'id'", "in", "kwargs", ":", "source_id", "=", "kwargs", ".", "pop", "(", "'id'", ")", "kwargs", "[", "'source_id'", "]", "=", "source_id", "if", "'releases'", "in", "kwargs", ":", "kwargs", ".", "pop", "(", "'releases'", ")", "path", "=", "'releases'", "else", ":", "path", "=", "None", "return", "Fred", "(", ")", ".", "source", "(", "path", ",", "*", "*", "kwargs", ")" ]
30.384615
9.615385
def push_external_commands_to_schedulers(self): # pragma: no cover - not used! """Send external commands to schedulers :return: None """ # Now get all external commands and push them to the schedulers for external_command in self.external_commands: self.external_commands_manager.resolve_command(external_command) # Now for all reachable schedulers, send the commands sent = False for scheduler_link in self.conf.schedulers: ext_cmds = scheduler_link.external_commands if ext_cmds and scheduler_link.reachable: logger.debug("Sending %d commands to the scheduler %s", len(ext_cmds), scheduler_link.name) if scheduler_link.push_external_commands(ext_cmds): statsmgr.counter('external-commands.pushed.count', len(ext_cmds)) sent = True if sent: # Clean the pushed commands scheduler_link.external_commands.clear()
[ "def", "push_external_commands_to_schedulers", "(", "self", ")", ":", "# pragma: no cover - not used!", "# Now get all external commands and push them to the schedulers", "for", "external_command", "in", "self", ".", "external_commands", ":", "self", ".", "external_commands_manager", ".", "resolve_command", "(", "external_command", ")", "# Now for all reachable schedulers, send the commands", "sent", "=", "False", "for", "scheduler_link", "in", "self", ".", "conf", ".", "schedulers", ":", "ext_cmds", "=", "scheduler_link", ".", "external_commands", "if", "ext_cmds", "and", "scheduler_link", ".", "reachable", ":", "logger", ".", "debug", "(", "\"Sending %d commands to the scheduler %s\"", ",", "len", "(", "ext_cmds", ")", ",", "scheduler_link", ".", "name", ")", "if", "scheduler_link", ".", "push_external_commands", "(", "ext_cmds", ")", ":", "statsmgr", ".", "counter", "(", "'external-commands.pushed.count'", ",", "len", "(", "ext_cmds", ")", ")", "sent", "=", "True", "if", "sent", ":", "# Clean the pushed commands", "scheduler_link", ".", "external_commands", ".", "clear", "(", ")" ]
47.136364
21.590909
def send(self, sender: PytgbotApiBot): """ Send the message via pytgbot. :param sender: The bot instance to send with. :type sender: pytgbot.bot.Bot :rtype: PytgbotApiMessage """ return sender.send_video_note( # receiver, self.media, disable_notification=self.disable_notification, reply_to_message_id=reply_id video_note=self.video_note, chat_id=self.receiver, reply_to_message_id=self.reply_id, duration=self.duration, length=self.length, thumb=self.thumb, disable_notification=self.disable_notification, reply_markup=self.reply_markup )
[ "def", "send", "(", "self", ",", "sender", ":", "PytgbotApiBot", ")", ":", "return", "sender", ".", "send_video_note", "(", "# receiver, self.media, disable_notification=self.disable_notification, reply_to_message_id=reply_id", "video_note", "=", "self", ".", "video_note", ",", "chat_id", "=", "self", ".", "receiver", ",", "reply_to_message_id", "=", "self", ".", "reply_id", ",", "duration", "=", "self", ".", "duration", ",", "length", "=", "self", ".", "length", ",", "thumb", "=", "self", ".", "thumb", ",", "disable_notification", "=", "self", ".", "disable_notification", ",", "reply_markup", "=", "self", ".", "reply_markup", ")" ]
47.538462
31.538462
def addGene( self, gene_id, gene_label, gene_type=None, gene_description=None ): ''' genes are classes ''' if gene_type is None: gene_type = self.globaltt['gene'] self.model.addClassToGraph(gene_id, gene_label, gene_type, gene_description) return
[ "def", "addGene", "(", "self", ",", "gene_id", ",", "gene_label", ",", "gene_type", "=", "None", ",", "gene_description", "=", "None", ")", ":", "if", "gene_type", "is", "None", ":", "gene_type", "=", "self", ".", "globaltt", "[", "'gene'", "]", "self", ".", "model", ".", "addClassToGraph", "(", "gene_id", ",", "gene_label", ",", "gene_type", ",", "gene_description", ")", "return" ]
33.222222
25.666667
def search(signal='', action='', signals=SIGNALS): """ Search the signals DB for signal named *signal*, and which action matches *action* in a case insensitive way. :param signal: Regex for signal name. :param action: Regex for default action. :param signals: Database of signals. """ sig_re = re.compile(signal, re.IGNORECASE) act_re = re.compile(action, re.IGNORECASE) res = [] for code in signals: sig, act, _ = signals[code] if sig_re.match(sig) and act_re.match(act): res.append(explain(code, signals=signals)) return res
[ "def", "search", "(", "signal", "=", "''", ",", "action", "=", "''", ",", "signals", "=", "SIGNALS", ")", ":", "sig_re", "=", "re", ".", "compile", "(", "signal", ",", "re", ".", "IGNORECASE", ")", "act_re", "=", "re", ".", "compile", "(", "action", ",", "re", ".", "IGNORECASE", ")", "res", "=", "[", "]", "for", "code", "in", "signals", ":", "sig", ",", "act", ",", "_", "=", "signals", "[", "code", "]", "if", "sig_re", ".", "match", "(", "sig", ")", "and", "act_re", ".", "match", "(", "act", ")", ":", "res", ".", "append", "(", "explain", "(", "code", ",", "signals", "=", "signals", ")", ")", "return", "res" ]
32.777778
11.444444
def title(self, value): """ Setter for **self.__title** attribute. :param value: Attribute value. :type value: unicode """ if value is not None: assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format( "title", value) self.__title = value
[ "def", "title", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "assert", "type", "(", "value", ")", "is", "unicode", ",", "\"'{0}' attribute: '{1}' type is not 'unicode'!\"", ".", "format", "(", "\"title\"", ",", "value", ")", "self", ".", "__title", "=", "value" ]
28.583333
17.25
def _write_new_tag_to_init(self): """ Write version to __init__.py by editing in place """ for line in fileinput.input(self.init_file, inplace=1): if line.strip().startswith("__version__"): line = "__version__ = \"" + self.tag + "\"" print(line.strip("\n"))
[ "def", "_write_new_tag_to_init", "(", "self", ")", ":", "for", "line", "in", "fileinput", ".", "input", "(", "self", ".", "init_file", ",", "inplace", "=", "1", ")", ":", "if", "line", ".", "strip", "(", ")", ".", "startswith", "(", "\"__version__\"", ")", ":", "line", "=", "\"__version__ = \\\"\"", "+", "self", ".", "tag", "+", "\"\\\"\"", "print", "(", "line", ".", "strip", "(", "\"\\n\"", ")", ")" ]
40.25
10.5
def shutdown(self): """Disconnect all connections and end the loop :returns: None :rtype: None :raises: None """ log.debug('Shutting down %s' % self) self.disconnect_all() self._looping.clear()
[ "def", "shutdown", "(", "self", ")", ":", "log", ".", "debug", "(", "'Shutting down %s'", "%", "self", ")", "self", ".", "disconnect_all", "(", ")", "self", ".", "_looping", ".", "clear", "(", ")" ]
24.9
14.4
def cli(ctx, **kwargs): """ A powerful spider system in python. """ if kwargs['add_sys_path']: sys.path.append(os.getcwd()) logging.config.fileConfig(kwargs['logging_config']) # get db from env for db in ('taskdb', 'projectdb', 'resultdb'): if kwargs[db] is not None: continue if os.environ.get('MYSQL_NAME'): kwargs[db] = utils.Get(lambda db=db: connect_database( 'sqlalchemy+mysql+%s://%s:%s/%s' % ( db, os.environ['MYSQL_PORT_3306_TCP_ADDR'], os.environ['MYSQL_PORT_3306_TCP_PORT'], db))) elif os.environ.get('MONGODB_NAME'): kwargs[db] = utils.Get(lambda db=db: connect_database( 'mongodb+%s://%s:%s/%s' % ( db, os.environ['MONGODB_PORT_27017_TCP_ADDR'], os.environ['MONGODB_PORT_27017_TCP_PORT'], db))) elif ctx.invoked_subcommand == 'bench': if kwargs['data_path'] == './data': kwargs['data_path'] += '/bench' shutil.rmtree(kwargs['data_path'], ignore_errors=True) os.mkdir(kwargs['data_path']) if db in ('taskdb', 'resultdb'): kwargs[db] = utils.Get(lambda db=db: connect_database('sqlite+%s://' % (db))) elif db in ('projectdb', ): kwargs[db] = utils.Get(lambda db=db: connect_database('local+%s://%s' % ( db, os.path.join(os.path.dirname(__file__), 'libs/bench.py')))) else: if not os.path.exists(kwargs['data_path']): os.mkdir(kwargs['data_path']) kwargs[db] = utils.Get(lambda db=db: connect_database('sqlite+%s:///%s/%s.db' % ( db, kwargs['data_path'], db[:-2]))) kwargs['is_%s_default' % db] = True # create folder for counter.dump if not os.path.exists(kwargs['data_path']): os.mkdir(kwargs['data_path']) # message queue, compatible with old version if kwargs.get('message_queue'): pass elif kwargs.get('amqp_url'): kwargs['message_queue'] = kwargs['amqp_url'] elif os.environ.get('RABBITMQ_NAME'): kwargs['message_queue'] = ("amqp://guest:guest@%(RABBITMQ_PORT_5672_TCP_ADDR)s" ":%(RABBITMQ_PORT_5672_TCP_PORT)s/%%2F" % os.environ) elif kwargs.get('beanstalk'): kwargs['message_queue'] = "beanstalk://%s/" % kwargs['beanstalk'] for name in ('newtask_queue', 'status_queue', 'scheduler2fetcher', 'fetcher2processor', 'processor2result'): if kwargs.get('message_queue'): kwargs[name] = utils.Get(lambda name=name: connect_message_queue( name, kwargs.get('message_queue'), kwargs['queue_maxsize'])) else: kwargs[name] = connect_message_queue(name, kwargs.get('message_queue'), kwargs['queue_maxsize']) # phantomjs-proxy if kwargs.get('phantomjs_proxy'): pass elif os.environ.get('PHANTOMJS_NAME'): kwargs['phantomjs_proxy'] = os.environ['PHANTOMJS_PORT_25555_TCP'][len('tcp://'):] # puppeteer-proxy if kwargs.get('puppeteer_proxy'): pass elif os.environ.get('PUPPETEER_NAME'): kwargs['puppeteer_proxy'] = os.environ['PUPPETEER_PORT_22222_TCP'][len('tcp://'):] ctx.obj = utils.ObjectDict(ctx.obj or {}) ctx.obj['instances'] = [] ctx.obj.update(kwargs) if ctx.invoked_subcommand is None and not ctx.obj.get('testing_mode'): ctx.invoke(all) return ctx
[ "def", "cli", "(", "ctx", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", "[", "'add_sys_path'", "]", ":", "sys", ".", "path", ".", "append", "(", "os", ".", "getcwd", "(", ")", ")", "logging", ".", "config", ".", "fileConfig", "(", "kwargs", "[", "'logging_config'", "]", ")", "# get db from env", "for", "db", "in", "(", "'taskdb'", ",", "'projectdb'", ",", "'resultdb'", ")", ":", "if", "kwargs", "[", "db", "]", "is", "not", "None", ":", "continue", "if", "os", ".", "environ", ".", "get", "(", "'MYSQL_NAME'", ")", ":", "kwargs", "[", "db", "]", "=", "utils", ".", "Get", "(", "lambda", "db", "=", "db", ":", "connect_database", "(", "'sqlalchemy+mysql+%s://%s:%s/%s'", "%", "(", "db", ",", "os", ".", "environ", "[", "'MYSQL_PORT_3306_TCP_ADDR'", "]", ",", "os", ".", "environ", "[", "'MYSQL_PORT_3306_TCP_PORT'", "]", ",", "db", ")", ")", ")", "elif", "os", ".", "environ", ".", "get", "(", "'MONGODB_NAME'", ")", ":", "kwargs", "[", "db", "]", "=", "utils", ".", "Get", "(", "lambda", "db", "=", "db", ":", "connect_database", "(", "'mongodb+%s://%s:%s/%s'", "%", "(", "db", ",", "os", ".", "environ", "[", "'MONGODB_PORT_27017_TCP_ADDR'", "]", ",", "os", ".", "environ", "[", "'MONGODB_PORT_27017_TCP_PORT'", "]", ",", "db", ")", ")", ")", "elif", "ctx", ".", "invoked_subcommand", "==", "'bench'", ":", "if", "kwargs", "[", "'data_path'", "]", "==", "'./data'", ":", "kwargs", "[", "'data_path'", "]", "+=", "'/bench'", "shutil", ".", "rmtree", "(", "kwargs", "[", "'data_path'", "]", ",", "ignore_errors", "=", "True", ")", "os", ".", "mkdir", "(", "kwargs", "[", "'data_path'", "]", ")", "if", "db", "in", "(", "'taskdb'", ",", "'resultdb'", ")", ":", "kwargs", "[", "db", "]", "=", "utils", ".", "Get", "(", "lambda", "db", "=", "db", ":", "connect_database", "(", "'sqlite+%s://'", "%", "(", "db", ")", ")", ")", "elif", "db", "in", "(", "'projectdb'", ",", ")", ":", "kwargs", "[", "db", "]", "=", "utils", ".", "Get", "(", "lambda", "db", "=", "db", ":", "connect_database", "(", "'local+%s://%s'", "%", "(", "db", ",", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'libs/bench.py'", ")", ")", ")", ")", "else", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "kwargs", "[", "'data_path'", "]", ")", ":", "os", ".", "mkdir", "(", "kwargs", "[", "'data_path'", "]", ")", "kwargs", "[", "db", "]", "=", "utils", ".", "Get", "(", "lambda", "db", "=", "db", ":", "connect_database", "(", "'sqlite+%s:///%s/%s.db'", "%", "(", "db", ",", "kwargs", "[", "'data_path'", "]", ",", "db", "[", ":", "-", "2", "]", ")", ")", ")", "kwargs", "[", "'is_%s_default'", "%", "db", "]", "=", "True", "# create folder for counter.dump", "if", "not", "os", ".", "path", ".", "exists", "(", "kwargs", "[", "'data_path'", "]", ")", ":", "os", ".", "mkdir", "(", "kwargs", "[", "'data_path'", "]", ")", "# message queue, compatible with old version", "if", "kwargs", ".", "get", "(", "'message_queue'", ")", ":", "pass", "elif", "kwargs", ".", "get", "(", "'amqp_url'", ")", ":", "kwargs", "[", "'message_queue'", "]", "=", "kwargs", "[", "'amqp_url'", "]", "elif", "os", ".", "environ", ".", "get", "(", "'RABBITMQ_NAME'", ")", ":", "kwargs", "[", "'message_queue'", "]", "=", "(", "\"amqp://guest:guest@%(RABBITMQ_PORT_5672_TCP_ADDR)s\"", "\":%(RABBITMQ_PORT_5672_TCP_PORT)s/%%2F\"", "%", "os", ".", "environ", ")", "elif", "kwargs", ".", "get", "(", "'beanstalk'", ")", ":", "kwargs", "[", "'message_queue'", "]", "=", "\"beanstalk://%s/\"", "%", "kwargs", "[", "'beanstalk'", "]", "for", "name", "in", "(", "'newtask_queue'", ",", "'status_queue'", ",", "'scheduler2fetcher'", ",", "'fetcher2processor'", ",", "'processor2result'", ")", ":", "if", "kwargs", ".", "get", "(", "'message_queue'", ")", ":", "kwargs", "[", "name", "]", "=", "utils", ".", "Get", "(", "lambda", "name", "=", "name", ":", "connect_message_queue", "(", "name", ",", "kwargs", ".", "get", "(", "'message_queue'", ")", ",", "kwargs", "[", "'queue_maxsize'", "]", ")", ")", "else", ":", "kwargs", "[", "name", "]", "=", "connect_message_queue", "(", "name", ",", "kwargs", ".", "get", "(", "'message_queue'", ")", ",", "kwargs", "[", "'queue_maxsize'", "]", ")", "# phantomjs-proxy", "if", "kwargs", ".", "get", "(", "'phantomjs_proxy'", ")", ":", "pass", "elif", "os", ".", "environ", ".", "get", "(", "'PHANTOMJS_NAME'", ")", ":", "kwargs", "[", "'phantomjs_proxy'", "]", "=", "os", ".", "environ", "[", "'PHANTOMJS_PORT_25555_TCP'", "]", "[", "len", "(", "'tcp://'", ")", ":", "]", "# puppeteer-proxy", "if", "kwargs", ".", "get", "(", "'puppeteer_proxy'", ")", ":", "pass", "elif", "os", ".", "environ", ".", "get", "(", "'PUPPETEER_NAME'", ")", ":", "kwargs", "[", "'puppeteer_proxy'", "]", "=", "os", ".", "environ", "[", "'PUPPETEER_PORT_22222_TCP'", "]", "[", "len", "(", "'tcp://'", ")", ":", "]", "ctx", ".", "obj", "=", "utils", ".", "ObjectDict", "(", "ctx", ".", "obj", "or", "{", "}", ")", "ctx", ".", "obj", "[", "'instances'", "]", "=", "[", "]", "ctx", ".", "obj", ".", "update", "(", "kwargs", ")", "if", "ctx", ".", "invoked_subcommand", "is", "None", "and", "not", "ctx", ".", "obj", ".", "get", "(", "'testing_mode'", ")", ":", "ctx", ".", "invoke", "(", "all", ")", "return", "ctx" ]
42.506024
20.554217
def ask_question(self, field_name, pattern=NAME_PATTERN, is_required=False, password=False): """Ask a question and get the input values. This method will validade the input values. Args: field_name(string): Field name used to ask for input value. pattern(tuple): Pattern to validate the input value. is_required(bool): Boolean value if the input value is required. password(bool): Boolean value to get input password with mask. Returns: input_value(string): Input value validated. """ input_value = "" question = ("Insert the field using the pattern below:" "\n{}\n{}: ".format(pattern[0], field_name)) while not input_value: input_value = getpass(question) if password else input(question) if not (input_value or is_required): break if password: confirm_password = getpass('Confirm your password: ') if confirm_password != input_value: print("Password does not match") input_value = "" if not self.valid_attribute(input_value, pattern[1]): error_message = "The content must fit the pattern: {}\n" print(error_message.format(pattern[0])) input_value = "" return input_value
[ "def", "ask_question", "(", "self", ",", "field_name", ",", "pattern", "=", "NAME_PATTERN", ",", "is_required", "=", "False", ",", "password", "=", "False", ")", ":", "input_value", "=", "\"\"", "question", "=", "(", "\"Insert the field using the pattern below:\"", "\"\\n{}\\n{}: \"", ".", "format", "(", "pattern", "[", "0", "]", ",", "field_name", ")", ")", "while", "not", "input_value", ":", "input_value", "=", "getpass", "(", "question", ")", "if", "password", "else", "input", "(", "question", ")", "if", "not", "(", "input_value", "or", "is_required", ")", ":", "break", "if", "password", ":", "confirm_password", "=", "getpass", "(", "'Confirm your password: '", ")", "if", "confirm_password", "!=", "input_value", ":", "print", "(", "\"Password does not match\"", ")", "input_value", "=", "\"\"", "if", "not", "self", ".", "valid_attribute", "(", "input_value", ",", "pattern", "[", "1", "]", ")", ":", "error_message", "=", "\"The content must fit the pattern: {}\\n\"", "print", "(", "error_message", ".", "format", "(", "pattern", "[", "0", "]", ")", ")", "input_value", "=", "\"\"", "return", "input_value" ]
38.944444
22.833333
def copyFile(input, output, replace=None): """Copy a file whole from input to output.""" _found = findFile(output) if not _found or (_found and replace): shutil.copy2(input, output)
[ "def", "copyFile", "(", "input", ",", "output", ",", "replace", "=", "None", ")", ":", "_found", "=", "findFile", "(", "output", ")", "if", "not", "_found", "or", "(", "_found", "and", "replace", ")", ":", "shutil", ".", "copy2", "(", "input", ",", "output", ")" ]
32.833333
10