text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def fetch_object(self, obj_name, include_meta=False, chunk_size=None): """ Alias for self.fetch(); included for backwards compatibility """ return self.fetch(obj=obj_name, include_meta=include_meta, chunk_size=chunk_size)
[ "def", "fetch_object", "(", "self", ",", "obj_name", ",", "include_meta", "=", "False", ",", "chunk_size", "=", "None", ")", ":", "return", "self", ".", "fetch", "(", "obj", "=", "obj_name", ",", "include_meta", "=", "include_meta", ",", "chunk_size", "=", "chunk_size", ")" ]
44
14.333333
def schedule_function(queue_name, function_name, *args, **kwargs): """ Schedule a function named `function_name` to be run by workers on the queue `queue_name` with *args and **kwargs as specified by that function. """ body = create_request_body(function_name, *args, **kwargs) if getattr(settings, 'BEANSTALK_DISPATCH_EXECUTE_SYNCHRONOUSLY', False): execute_function(json.loads(body)) else: connection = boto.connect_sqs( settings.BEANSTALK_DISPATCH_SQS_KEY, settings.BEANSTALK_DISPATCH_SQS_SECRET) queue = connection.get_queue(queue_name) if not queue: queue = connection.create_queue(queue_name) message = boto.sqs.message.Message() message.set_body(body) queue.write(message)
[ "def", "schedule_function", "(", "queue_name", ",", "function_name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "body", "=", "create_request_body", "(", "function_name", ",", "*", "args", ",", "*", "*", "kwargs", ")", "if", "getattr", "(", "settings", ",", "'BEANSTALK_DISPATCH_EXECUTE_SYNCHRONOUSLY'", ",", "False", ")", ":", "execute_function", "(", "json", ".", "loads", "(", "body", ")", ")", "else", ":", "connection", "=", "boto", ".", "connect_sqs", "(", "settings", ".", "BEANSTALK_DISPATCH_SQS_KEY", ",", "settings", ".", "BEANSTALK_DISPATCH_SQS_SECRET", ")", "queue", "=", "connection", ".", "get_queue", "(", "queue_name", ")", "if", "not", "queue", ":", "queue", "=", "connection", ".", "create_queue", "(", "queue_name", ")", "message", "=", "boto", ".", "sqs", ".", "message", ".", "Message", "(", ")", "message", ".", "set_body", "(", "body", ")", "queue", ".", "write", "(", "message", ")" ]
41.315789
15.421053
def follow( # type: ignore self, users, strategies, total_assets=10000, initial_assets=None, adjust_sell=False, track_interval=10, trade_cmd_expire_seconds=120, cmd_cache=True, slippage: float = 0.0): """跟踪 joinquant 对应的模拟交易,支持多用户多策略 :param users: 支持 easytrader 的用户对象,支持使用 [] 指定多个用户 :param strategies: 雪球组合名, 类似 ZH123450 :param total_assets: 雪球组合对应的总资产, 格式 [组合1对应资金, 组合2对应资金] 若 strategies=['ZH000001', 'ZH000002'], 设置 total_assets=[10000, 10000], 则表明每个组合对应的资产为 1w 元 假设组合 ZH000001 加仓 价格为 p 股票 A 10%, 则对应的交易指令为 买入 股票 A 价格 P 股数 1w * 10% / p 并按 100 取整 :param adjust_sell: 是否根据用户的实际持仓数调整卖出股票数量, 当卖出股票数大于实际持仓数时,调整为实际持仓数。目前仅在银河客户端测试通过。 当 users 为多个时,根据第一个 user 的持仓数决定 :type adjust_sell: bool :param initial_assets: 雪球组合对应的初始资产, 格式 [ 组合1对应资金, 组合2对应资金 ] 总资产由 初始资产 × 组合净值 算得, total_assets 会覆盖此参数 :param track_interval: 轮训模拟交易时间,单位为秒 :param trade_cmd_expire_seconds: 交易指令过期时间, 单位为秒 :param cmd_cache: 是否读取存储历史执行过的指令,防止重启时重复执行已经交易过的指令 :param slippage: 滑点,0.0 表示无滑点, 0.05 表示滑点为 5% """ super().follow(users=users, strategies=strategies, track_interval=track_interval, trade_cmd_expire_seconds=trade_cmd_expire_seconds, cmd_cache=cmd_cache, slippage=slippage) self._adjust_sell = adjust_sell self._users = self.warp_list(users) strategies = self.warp_list(strategies) total_assets = self.warp_list(total_assets) initial_assets = self.warp_list(initial_assets) if cmd_cache: self.load_expired_cmd_cache() self.start_trader_thread(self._users, trade_cmd_expire_seconds) for strategy_url, strategy_total_assets, strategy_initial_assets in zip( strategies, total_assets, initial_assets): assets = self.calculate_assets(strategy_url, strategy_total_assets, strategy_initial_assets) try: strategy_id = self.extract_strategy_id(strategy_url) strategy_name = self.extract_strategy_name(strategy_url) except: log.error('抽取交易id和策略名失败, 无效模拟交易url: %s', strategy_url) raise strategy_worker = Thread( target=self.track_strategy_worker, args=[strategy_id, strategy_name], kwargs={ 'interval': track_interval, 'assets': assets }) strategy_worker.start() log.info('开始跟踪策略: %s', strategy_name)
[ "def", "follow", "(", "# type: ignore", "self", ",", "users", ",", "strategies", ",", "total_assets", "=", "10000", ",", "initial_assets", "=", "None", ",", "adjust_sell", "=", "False", ",", "track_interval", "=", "10", ",", "trade_cmd_expire_seconds", "=", "120", ",", "cmd_cache", "=", "True", ",", "slippage", ":", "float", "=", "0.0", ")", ":", "super", "(", ")", ".", "follow", "(", "users", "=", "users", ",", "strategies", "=", "strategies", ",", "track_interval", "=", "track_interval", ",", "trade_cmd_expire_seconds", "=", "trade_cmd_expire_seconds", ",", "cmd_cache", "=", "cmd_cache", ",", "slippage", "=", "slippage", ")", "self", ".", "_adjust_sell", "=", "adjust_sell", "self", ".", "_users", "=", "self", ".", "warp_list", "(", "users", ")", "strategies", "=", "self", ".", "warp_list", "(", "strategies", ")", "total_assets", "=", "self", ".", "warp_list", "(", "total_assets", ")", "initial_assets", "=", "self", ".", "warp_list", "(", "initial_assets", ")", "if", "cmd_cache", ":", "self", ".", "load_expired_cmd_cache", "(", ")", "self", ".", "start_trader_thread", "(", "self", ".", "_users", ",", "trade_cmd_expire_seconds", ")", "for", "strategy_url", ",", "strategy_total_assets", ",", "strategy_initial_assets", "in", "zip", "(", "strategies", ",", "total_assets", ",", "initial_assets", ")", ":", "assets", "=", "self", ".", "calculate_assets", "(", "strategy_url", ",", "strategy_total_assets", ",", "strategy_initial_assets", ")", "try", ":", "strategy_id", "=", "self", ".", "extract_strategy_id", "(", "strategy_url", ")", "strategy_name", "=", "self", ".", "extract_strategy_name", "(", "strategy_url", ")", "except", ":", "log", ".", "error", "(", "'抽取交易id和策略名失败, 无效模拟交易url: %s', strategy_url)", "", "", "", "raise", "strategy_worker", "=", "Thread", "(", "target", "=", "self", ".", "track_strategy_worker", ",", "args", "=", "[", "strategy_id", ",", "strategy_name", "]", ",", "kwargs", "=", "{", "'interval'", ":", "track_interval", ",", "'assets'", ":", "assets", "}", ")", "strategy_worker", ".", "start", "(", ")", "log", ".", "info", "(", "'开始跟踪策略: %s', strategy_n", "a", "e)", "" ]
40.2
15.557143
def set_ftp_proxy(server, port, user=None, password=None, network_service="Ethernet", bypass_hosts=None): ''' Sets the ftp proxy settings server The proxy server to use port The port used by the proxy server user The username to use for the proxy server if required password The password to use if required by the server network_service The network service to apply the changes to, this only necessary on macOS bypass_hosts The hosts that are allowed to by pass the proxy. Only used on Windows for other OS's use set_proxy_bypass to edit the bypass hosts. CLI Example: .. code-block:: bash salt '*' proxy.set_ftp_proxy example.com 1080 user=proxy_user password=proxy_pass network_service=Ethernet ''' if __grains__['os'] == 'Windows': return _set_proxy_windows(server=server, port=port, types=['ftp'], bypass_hosts=bypass_hosts) return _set_proxy_osx(cmd_function="setftpproxy", server=server, port=port, user=user, password=password, network_service=network_service)
[ "def", "set_ftp_proxy", "(", "server", ",", "port", ",", "user", "=", "None", ",", "password", "=", "None", ",", "network_service", "=", "\"Ethernet\"", ",", "bypass_hosts", "=", "None", ")", ":", "if", "__grains__", "[", "'os'", "]", "==", "'Windows'", ":", "return", "_set_proxy_windows", "(", "server", "=", "server", ",", "port", "=", "port", ",", "types", "=", "[", "'ftp'", "]", ",", "bypass_hosts", "=", "bypass_hosts", ")", "return", "_set_proxy_osx", "(", "cmd_function", "=", "\"setftpproxy\"", ",", "server", "=", "server", ",", "port", "=", "port", ",", "user", "=", "user", ",", "password", "=", "password", ",", "network_service", "=", "network_service", ")" ]
29.468085
22.829787
def gaussian(x, a, b, c, d=0): ''' a -> height of the curve's peak b -> position of the center of the peak c -> standard deviation or Gaussian RMS width d -> offset ''' return a * np.exp( -(((x-b)**2 )/ (2*(c**2))) ) + d
[ "def", "gaussian", "(", "x", ",", "a", ",", "b", ",", "c", ",", "d", "=", "0", ")", ":", "return", "a", "*", "np", ".", "exp", "(", "-", "(", "(", "(", "x", "-", "b", ")", "**", "2", ")", "/", "(", "2", "*", "(", "c", "**", "2", ")", ")", ")", ")", "+", "d" ]
31.5
16.5
def get_by_addr(self, address): """ Lookup a set of notifications by address Args: address (UInt160 or str): hash of address for notifications Returns: list: a list of notifications """ addr = address if isinstance(address, str) and len(address) == 34: addr = Helper.AddrStrToScriptHash(address) if not isinstance(addr, UInt160): raise Exception("Incorrect address format") addrlist_snapshot = self.db.prefixed_db(NotificationPrefix.PREFIX_ADDR).snapshot() results = [] for val in addrlist_snapshot.iterator(prefix=bytes(addr.Data), include_key=False): if len(val) > 4: try: event = SmartContractEvent.FromByteArray(val) results.append(event) except Exception as e: logger.error("could not parse event: %s %s" % (e, val)) return results
[ "def", "get_by_addr", "(", "self", ",", "address", ")", ":", "addr", "=", "address", "if", "isinstance", "(", "address", ",", "str", ")", "and", "len", "(", "address", ")", "==", "34", ":", "addr", "=", "Helper", ".", "AddrStrToScriptHash", "(", "address", ")", "if", "not", "isinstance", "(", "addr", ",", "UInt160", ")", ":", "raise", "Exception", "(", "\"Incorrect address format\"", ")", "addrlist_snapshot", "=", "self", ".", "db", ".", "prefixed_db", "(", "NotificationPrefix", ".", "PREFIX_ADDR", ")", ".", "snapshot", "(", ")", "results", "=", "[", "]", "for", "val", "in", "addrlist_snapshot", ".", "iterator", "(", "prefix", "=", "bytes", "(", "addr", ".", "Data", ")", ",", "include_key", "=", "False", ")", ":", "if", "len", "(", "val", ")", ">", "4", ":", "try", ":", "event", "=", "SmartContractEvent", ".", "FromByteArray", "(", "val", ")", "results", ".", "append", "(", "event", ")", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "\"could not parse event: %s %s\"", "%", "(", "e", ",", "val", ")", ")", "return", "results" ]
35.62963
20.740741
def average_points_dist(p0, p_list): """ Computes the average distance between a list of points and a given point p0. """ return np.mean(list(point_dist(p0, p1) for p1 in p_list))
[ "def", "average_points_dist", "(", "p0", ",", "p_list", ")", ":", "return", "np", ".", "mean", "(", "list", "(", "point_dist", "(", "p0", ",", "p1", ")", "for", "p1", "in", "p_list", ")", ")" ]
32.333333
9.666667
def pymmh3_hash128(key: Union[bytes, bytearray], seed: int = 0, x64arch: bool = True) -> int: """ Implements 128bit murmur3 hash, as per ``pymmh3``. Args: key: data to hash seed: seed x64arch: is a 64-bit architecture available? Returns: integer hash """ if x64arch: return pymmh3_hash128_x64(key, seed) else: return pymmh3_hash128_x86(key, seed)
[ "def", "pymmh3_hash128", "(", "key", ":", "Union", "[", "bytes", ",", "bytearray", "]", ",", "seed", ":", "int", "=", "0", ",", "x64arch", ":", "bool", "=", "True", ")", "->", "int", ":", "if", "x64arch", ":", "return", "pymmh3_hash128_x64", "(", "key", ",", "seed", ")", "else", ":", "return", "pymmh3_hash128_x86", "(", "key", ",", "seed", ")" ]
23.421053
18.368421
def factor_mark(field_name, markers, factors, start=0, end=None): ''' Create a ``DataSpec`` dict that applies a client-side ``CategoricalMarkerMapper`` transformation to a ``ColumnDataSource`` column. .. note:: This transform is primarily only useful with ``scatter``, which can be parameterized by glyph type. Args: field_name (str) : a field name to configure ``DataSpec`` with markers (seq[string]) : a list of markers to use to map to factors (seq) : a sequences of categorical factors corresponding to the palette start (int, optional) : a start slice index to apply when the column data has factors with multiple levels. (default: 0) end (int, optional) : an end slice index to apply when the column data has factors with multiple levels. (default: None) Returns: dict ''' return field(field_name, CategoricalMarkerMapper(markers=markers, factors=factors, start=start, end=end))
[ "def", "factor_mark", "(", "field_name", ",", "markers", ",", "factors", ",", "start", "=", "0", ",", "end", "=", "None", ")", ":", "return", "field", "(", "field_name", ",", "CategoricalMarkerMapper", "(", "markers", "=", "markers", ",", "factors", "=", "factors", ",", "start", "=", "start", ",", "end", "=", "end", ")", ")" ]
37.193548
30.290323
def DeriveDataCopyValue(fieldName, deriveInput, overwrite, fieldVal, histObj={}): """ Return new value based on value from another field :param string fieldName: Field name to query against :param dict deriveInput: Values to perform lookup against: {"copyField1": "copyVal1"} :param bool overwrite: Should an existing field value be replaced :param string fieldVal: Current field value :param dict histObj: History object to which changes should be appended """ if len(deriveInput) > 1: raise Exception("more than one field/value in deriveInput") field_val_new = fieldVal row = list(deriveInput.keys())[0] if deriveInput[row] != '' and (overwrite or (fieldVal == '')): field_val_new = deriveInput[row] check_match = True else: check_match = False change = _CollectHistory_(lookupType='copyValue', fromVal=fieldVal, toVal=field_val_new, using=deriveInput) hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change, fieldName=fieldName) return field_val_new, hist_obj_upd, check_match
[ "def", "DeriveDataCopyValue", "(", "fieldName", ",", "deriveInput", ",", "overwrite", ",", "fieldVal", ",", "histObj", "=", "{", "}", ")", ":", "if", "len", "(", "deriveInput", ")", ">", "1", ":", "raise", "Exception", "(", "\"more than one field/value in deriveInput\"", ")", "field_val_new", "=", "fieldVal", "row", "=", "list", "(", "deriveInput", ".", "keys", "(", ")", ")", "[", "0", "]", "if", "deriveInput", "[", "row", "]", "!=", "''", "and", "(", "overwrite", "or", "(", "fieldVal", "==", "''", ")", ")", ":", "field_val_new", "=", "deriveInput", "[", "row", "]", "check_match", "=", "True", "else", ":", "check_match", "=", "False", "change", "=", "_CollectHistory_", "(", "lookupType", "=", "'copyValue'", ",", "fromVal", "=", "fieldVal", ",", "toVal", "=", "field_val_new", ",", "using", "=", "deriveInput", ")", "hist_obj_upd", "=", "_CollectHistoryAgg_", "(", "contactHist", "=", "histObj", ",", "fieldHistObj", "=", "change", ",", "fieldName", "=", "fieldName", ")", "return", "field_val_new", ",", "hist_obj_upd", ",", "check_match" ]
36.03125
23.59375
def incr(**vars): """Increments context variables """ for k, v in vars: current_context.vars.setdefault(k, 0) current_context[k] += v
[ "def", "incr", "(", "*", "*", "vars", ")", ":", "for", "k", ",", "v", "in", "vars", ":", "current_context", ".", "vars", ".", "setdefault", "(", "k", ",", "0", ")", "current_context", "[", "k", "]", "+=", "v" ]
26
9.333333
def contains(self, key): """Does this configuration contain a given key?""" if self._jconf is not None: return self._jconf.contains(key) else: return key in self._conf
[ "def", "contains", "(", "self", ",", "key", ")", ":", "if", "self", ".", "_jconf", "is", "not", "None", ":", "return", "self", ".", "_jconf", ".", "contains", "(", "key", ")", "else", ":", "return", "key", "in", "self", ".", "_conf" ]
35
9.333333
def attempt_connection(self): """ Establish a multicast connection - uses 2 sockets (one for sending, the other for receiving) """ self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2) self.receiver_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) self.receiver_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.receiver_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) self.receiver_socket.bind(('', MCAST_PORT)) mreq = struct.pack("4sl", socket.inet_aton(MCAST_GRP), socket.INADDR_ANY) self.receiver_socket.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq) if not self.socket or not self.receiver_socket: raise exception.ConnectFailedException()
[ "def", "attempt_connection", "(", "self", ")", ":", "self", ".", "socket", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_DGRAM", ",", "socket", ".", "IPPROTO_UDP", ")", "self", ".", "socket", ".", "setsockopt", "(", "socket", ".", "IPPROTO_IP", ",", "socket", ".", "IP_MULTICAST_TTL", ",", "2", ")", "self", ".", "receiver_socket", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_DGRAM", ",", "socket", ".", "IPPROTO_UDP", ")", "self", ".", "receiver_socket", ".", "setsockopt", "(", "socket", ".", "SOL_SOCKET", ",", "socket", ".", "SO_REUSEADDR", ",", "1", ")", "self", ".", "receiver_socket", ".", "setsockopt", "(", "socket", ".", "SOL_SOCKET", ",", "socket", ".", "SO_REUSEPORT", ",", "1", ")", "self", ".", "receiver_socket", ".", "bind", "(", "(", "''", ",", "MCAST_PORT", ")", ")", "mreq", "=", "struct", ".", "pack", "(", "\"4sl\"", ",", "socket", ".", "inet_aton", "(", "MCAST_GRP", ")", ",", "socket", ".", "INADDR_ANY", ")", "self", ".", "receiver_socket", ".", "setsockopt", "(", "socket", ".", "IPPROTO_IP", ",", "socket", ".", "IP_ADD_MEMBERSHIP", ",", "mreq", ")", "if", "not", "self", ".", "socket", "or", "not", "self", ".", "receiver_socket", ":", "raise", "exception", ".", "ConnectFailedException", "(", ")" ]
56.875
31.875
def read_until(self, terminator): """Read the input stream until the terminator is found. Throws MissingTerminator if the terminator is not found. Note: This method does not read from the line buffer. :return: the bytes read up to but excluding the terminator. """ lines = [] term = terminator + b'\n' while True: line = self.input.readline() if line == term: break else: lines.append(line) return b''.join(lines)
[ "def", "read_until", "(", "self", ",", "terminator", ")", ":", "lines", "=", "[", "]", "term", "=", "terminator", "+", "b'\\n'", "while", "True", ":", "line", "=", "self", ".", "input", ".", "readline", "(", ")", "if", "line", "==", "term", ":", "break", "else", ":", "lines", ".", "append", "(", "line", ")", "return", "b''", ".", "join", "(", "lines", ")" ]
28.368421
18.894737
def temperature_data_to_csv(temperature_data, path_or_buf): """ Write temperature data to CSV. See also :any:`pandas.DataFrame.to_csv`. Parameters ---------- temperature_data : :any:`pandas.Series` Temperature data series with :any:`pandas.DatetimeIndex`. path_or_buf : :any:`str` or file handle, default None File path or object, if None is provided the result is returned as a string. """ if temperature_data.index.name is None: temperature_data.index.name = "dt" if temperature_data.name is None: temperature_data.name = "temperature" return temperature_data.to_frame().to_csv(path_or_buf, index=True)
[ "def", "temperature_data_to_csv", "(", "temperature_data", ",", "path_or_buf", ")", ":", "if", "temperature_data", ".", "index", ".", "name", "is", "None", ":", "temperature_data", ".", "index", ".", "name", "=", "\"dt\"", "if", "temperature_data", ".", "name", "is", "None", ":", "temperature_data", ".", "name", "=", "\"temperature\"", "return", "temperature_data", ".", "to_frame", "(", ")", ".", "to_csv", "(", "path_or_buf", ",", "index", "=", "True", ")" ]
43.933333
16.2
def dp2hx(number, lenout=_default_len_out): """ Convert a double precision number to an equivalent character string using base 16 "scientific notation." http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dp2hx_c.html :param number: D.p. number to be converted. :type number: float :param lenout: Available space for output string. :type lenout: int :return: Equivalent character string, left justified. :rtype: str """ number = ctypes.c_double(number) lenout = ctypes.c_int(lenout) string = stypes.stringToCharP(lenout) length = ctypes.c_int() libspice.dp2hx_c(number, lenout, string, ctypes.byref(length)) return stypes.toPythonString(string)
[ "def", "dp2hx", "(", "number", ",", "lenout", "=", "_default_len_out", ")", ":", "number", "=", "ctypes", ".", "c_double", "(", "number", ")", "lenout", "=", "ctypes", ".", "c_int", "(", "lenout", ")", "string", "=", "stypes", ".", "stringToCharP", "(", "lenout", ")", "length", "=", "ctypes", ".", "c_int", "(", ")", "libspice", ".", "dp2hx_c", "(", "number", ",", "lenout", ",", "string", ",", "ctypes", ".", "byref", "(", "length", ")", ")", "return", "stypes", ".", "toPythonString", "(", "string", ")" ]
34.95
14.75
def multiple_replace(string, replacements): # type: (str, Dict[str,str]) -> str """Simultaneously replace multiple strigns in a string Args: string (str): Input string replacements (Dict[str,str]): Replacements dictionary Returns: str: String with replacements """ pattern = re.compile("|".join([re.escape(k) for k in sorted(replacements, key=len, reverse=True)]), flags=re.DOTALL) return pattern.sub(lambda x: replacements[x.group(0)], string)
[ "def", "multiple_replace", "(", "string", ",", "replacements", ")", ":", "# type: (str, Dict[str,str]) -> str", "pattern", "=", "re", ".", "compile", "(", "\"|\"", ".", "join", "(", "[", "re", ".", "escape", "(", "k", ")", "for", "k", "in", "sorted", "(", "replacements", ",", "key", "=", "len", ",", "reverse", "=", "True", ")", "]", ")", ",", "flags", "=", "re", ".", "DOTALL", ")", "return", "pattern", ".", "sub", "(", "lambda", "x", ":", "replacements", "[", "x", ".", "group", "(", "0", ")", "]", ",", "string", ")" ]
34.714286
22.785714
def no_ssl_verification(self): """ Requests module fails due to lets encrypt ssl encryption. Will be fixed in the future release.""" try: from functools import partialmethod except ImportError: # Python 2 fallback: https://gist.github.com/carymrobbins/8940382 from functools import partial class partialmethod(partial): def __get__(self, instance, owner): if instance is None: return self return partial(self.func, instance, *(self.args or ()), **(self.keywords or {})) old_request = requests.Session.request requests.Session.request = partialmethod(old_request, verify=False) warnings.filterwarnings('ignore', 'Unverified HTTPS request') yield warnings.resetwarnings() requests.Session.request = old_request
[ "def", "no_ssl_verification", "(", "self", ")", ":", "try", ":", "from", "functools", "import", "partialmethod", "except", "ImportError", ":", "# Python 2 fallback: https://gist.github.com/carymrobbins/8940382", "from", "functools", "import", "partial", "class", "partialmethod", "(", "partial", ")", ":", "def", "__get__", "(", "self", ",", "instance", ",", "owner", ")", ":", "if", "instance", "is", "None", ":", "return", "self", "return", "partial", "(", "self", ".", "func", ",", "instance", ",", "*", "(", "self", ".", "args", "or", "(", ")", ")", ",", "*", "*", "(", "self", ".", "keywords", "or", "{", "}", ")", ")", "old_request", "=", "requests", ".", "Session", ".", "request", "requests", ".", "Session", ".", "request", "=", "partialmethod", "(", "old_request", ",", "verify", "=", "False", ")", "warnings", ".", "filterwarnings", "(", "'ignore'", ",", "'Unverified HTTPS request'", ")", "yield", "warnings", ".", "resetwarnings", "(", ")", "requests", ".", "Session", ".", "request", "=", "old_request" ]
42.428571
19.238095
def key_set(self, predicate=None): """ Transactional implementation of :func:`Map.key_set(predicate) <hazelcast.proxy.map.Map.key_set>` :param predicate: (Predicate), predicate to filter the entries (optional). :return: (Sequence), a list of the clone of the keys. .. seealso:: :class:`~hazelcast.serialization.predicate.Predicate` for more info about predicates. """ if predicate: return self._encode_invoke(transactional_map_key_set_with_predicate_codec, predicate=self._to_data(predicate)) return self._encode_invoke(transactional_map_key_set_codec)
[ "def", "key_set", "(", "self", ",", "predicate", "=", "None", ")", ":", "if", "predicate", ":", "return", "self", ".", "_encode_invoke", "(", "transactional_map_key_set_with_predicate_codec", ",", "predicate", "=", "self", ".", "_to_data", "(", "predicate", ")", ")", "return", "self", ".", "_encode_invoke", "(", "transactional_map_key_set_codec", ")" ]
44.533333
30.4
def readBIM(basefilename,usecols=None): """ helper method for speeding up read BED """ bim = basefilename+ '.bim' bim = SP.loadtxt(bim,dtype=bytes,usecols=usecols) return bim
[ "def", "readBIM", "(", "basefilename", ",", "usecols", "=", "None", ")", ":", "bim", "=", "basefilename", "+", "'.bim'", "bim", "=", "SP", ".", "loadtxt", "(", "bim", ",", "dtype", "=", "bytes", ",", "usecols", "=", "usecols", ")", "return", "bim" ]
27.428571
7.428571
def addDuplicateAnalyses(self, src_slot, dest_slot=None): """ Creates and add duplicate analyes from the src_slot to the dest_slot If no destination slot is defined, the most suitable slot will be used, typically a new slot at the end of the worksheet will be added. :param src_slot: slot that contains the analyses to duplicate :param dest_slot: slot where the duplicate analyses must be stored :return: the list of duplicate analyses added """ # Duplicate analyses can only be added if the state of the ws is open # unless we are adding a retest if api.get_workflow_status_of(self) != "open": return [] slot_from = to_int(src_slot, 0) if slot_from < 1: return [] slot_to = to_int(dest_slot, 0) if slot_to < 0: return [] if not slot_to: # Find the suitable slot to add these duplicates slot_to = self.get_suitable_slot_for_duplicate(slot_from) return self.addDuplicateAnalyses(src_slot, slot_to) processed = map(lambda an: api.get_uid(an.getAnalysis()), self.get_analyses_at(slot_to)) src_analyses = list() for analysis in self.get_analyses_at(slot_from): if api.get_uid(analysis) in processed: if api.get_workflow_status_of(analysis) != "retracted": continue src_analyses.append(analysis) ref_gid = None duplicates = list() for analysis in src_analyses: duplicate = self.add_duplicate_analysis(analysis, slot_to, ref_gid) if not duplicate: continue # All duplicates from the same slot must have the same group id ref_gid = ref_gid or duplicate.getReferenceAnalysesGroupID() duplicates.append(duplicate) return duplicates
[ "def", "addDuplicateAnalyses", "(", "self", ",", "src_slot", ",", "dest_slot", "=", "None", ")", ":", "# Duplicate analyses can only be added if the state of the ws is open", "# unless we are adding a retest", "if", "api", ".", "get_workflow_status_of", "(", "self", ")", "!=", "\"open\"", ":", "return", "[", "]", "slot_from", "=", "to_int", "(", "src_slot", ",", "0", ")", "if", "slot_from", "<", "1", ":", "return", "[", "]", "slot_to", "=", "to_int", "(", "dest_slot", ",", "0", ")", "if", "slot_to", "<", "0", ":", "return", "[", "]", "if", "not", "slot_to", ":", "# Find the suitable slot to add these duplicates", "slot_to", "=", "self", ".", "get_suitable_slot_for_duplicate", "(", "slot_from", ")", "return", "self", ".", "addDuplicateAnalyses", "(", "src_slot", ",", "slot_to", ")", "processed", "=", "map", "(", "lambda", "an", ":", "api", ".", "get_uid", "(", "an", ".", "getAnalysis", "(", ")", ")", ",", "self", ".", "get_analyses_at", "(", "slot_to", ")", ")", "src_analyses", "=", "list", "(", ")", "for", "analysis", "in", "self", ".", "get_analyses_at", "(", "slot_from", ")", ":", "if", "api", ".", "get_uid", "(", "analysis", ")", "in", "processed", ":", "if", "api", ".", "get_workflow_status_of", "(", "analysis", ")", "!=", "\"retracted\"", ":", "continue", "src_analyses", ".", "append", "(", "analysis", ")", "ref_gid", "=", "None", "duplicates", "=", "list", "(", ")", "for", "analysis", "in", "src_analyses", ":", "duplicate", "=", "self", ".", "add_duplicate_analysis", "(", "analysis", ",", "slot_to", ",", "ref_gid", ")", "if", "not", "duplicate", ":", "continue", "# All duplicates from the same slot must have the same group id", "ref_gid", "=", "ref_gid", "or", "duplicate", ".", "getReferenceAnalysesGroupID", "(", ")", "duplicates", ".", "append", "(", "duplicate", ")", "return", "duplicates" ]
42.977273
19.5
def send_to_room(self, message, room_name): """ Sends a given message to a given room """ room = self.get_room(room_name) if room is not None: room.send_message(message)
[ "def", "send_to_room", "(", "self", ",", "message", ",", "room_name", ")", ":", "room", "=", "self", ".", "get_room", "(", "room_name", ")", "if", "room", "is", "not", "None", ":", "room", ".", "send_message", "(", "message", ")" ]
34.333333
9.333333
def args_to_dict(args): # type: (str) -> DictUpperBound[str,str] """Convert command line arguments in a comma separated string to a dictionary Args: args (str): Command line arguments Returns: DictUpperBound[str,str]: Dictionary of arguments """ arguments = dict() for arg in args.split(','): key, value = arg.split('=') arguments[key] = value return arguments
[ "def", "args_to_dict", "(", "args", ")", ":", "# type: (str) -> DictUpperBound[str,str]", "arguments", "=", "dict", "(", ")", "for", "arg", "in", "args", ".", "split", "(", "','", ")", ":", "key", ",", "value", "=", "arg", ".", "split", "(", "'='", ")", "arguments", "[", "key", "]", "=", "value", "return", "arguments" ]
25.75
17.5
def filter_publication(publication, cmp_authors=True): """ Filter publications based at data from Aleph. Args: publication (obj): :class:`.Publication` instance. Returns: obj/None: None if the publication was found in Aleph or `publication` \ if not. """ query = None isbn_query = False # there can be ISBN query or book title query if publication.optionals and publication.optionals.ISBN: query = aleph.ISBNQuery(publication.optionals.ISBN) isbn_query = True else: query = aleph.TitleQuery(publication.title) result = aleph.reactToAMQPMessage(aleph.SearchRequest(query), "") if not result.records: return publication # book is not in database # if there was results with this ISBN, compare titles of the books # (sometimes, there are different books with same ISBN because of human # errors) if isbn_query: for record in result.records: epub = record.epublication # try to match title of the book if compare_names(epub.nazev, publication.title) >= 80: return None # book already in database return publication # checks whether the details from returned EPublication match Publication's for record in result.records: epub = record.epublication # if the title doens't match, go to next record from aleph if not compare_names(epub.nazev, publication.title) >= 80: continue if not cmp_authors: return None # book already in database # compare authors names for author in epub.autori: # convert Aleph's author structure to string author_str = "%s %s %s" % ( author.firstName, author.lastName, author.title ) # normalize author data from `publication` pub_authors = map(lambda x: x.name, publication.authors) if type(pub_authors) not in [list, tuple, set]: pub_authors = [pub_authors] # try to compare authors from `publication` and Aleph for pub_author in pub_authors: if compare_names(author_str, pub_author) >= 50: return None # book already in database return publication
[ "def", "filter_publication", "(", "publication", ",", "cmp_authors", "=", "True", ")", ":", "query", "=", "None", "isbn_query", "=", "False", "# there can be ISBN query or book title query", "if", "publication", ".", "optionals", "and", "publication", ".", "optionals", ".", "ISBN", ":", "query", "=", "aleph", ".", "ISBNQuery", "(", "publication", ".", "optionals", ".", "ISBN", ")", "isbn_query", "=", "True", "else", ":", "query", "=", "aleph", ".", "TitleQuery", "(", "publication", ".", "title", ")", "result", "=", "aleph", ".", "reactToAMQPMessage", "(", "aleph", ".", "SearchRequest", "(", "query", ")", ",", "\"\"", ")", "if", "not", "result", ".", "records", ":", "return", "publication", "# book is not in database", "# if there was results with this ISBN, compare titles of the books", "# (sometimes, there are different books with same ISBN because of human", "# errors)", "if", "isbn_query", ":", "for", "record", "in", "result", ".", "records", ":", "epub", "=", "record", ".", "epublication", "# try to match title of the book", "if", "compare_names", "(", "epub", ".", "nazev", ",", "publication", ".", "title", ")", ">=", "80", ":", "return", "None", "# book already in database", "return", "publication", "# checks whether the details from returned EPublication match Publication's", "for", "record", "in", "result", ".", "records", ":", "epub", "=", "record", ".", "epublication", "# if the title doens't match, go to next record from aleph", "if", "not", "compare_names", "(", "epub", ".", "nazev", ",", "publication", ".", "title", ")", ">=", "80", ":", "continue", "if", "not", "cmp_authors", ":", "return", "None", "# book already in database", "# compare authors names", "for", "author", "in", "epub", ".", "autori", ":", "# convert Aleph's author structure to string", "author_str", "=", "\"%s %s %s\"", "%", "(", "author", ".", "firstName", ",", "author", ".", "lastName", ",", "author", ".", "title", ")", "# normalize author data from `publication`", "pub_authors", "=", "map", "(", "lambda", "x", ":", "x", ".", "name", ",", "publication", ".", "authors", ")", "if", "type", "(", "pub_authors", ")", "not", "in", "[", "list", ",", "tuple", ",", "set", "]", ":", "pub_authors", "=", "[", "pub_authors", "]", "# try to compare authors from `publication` and Aleph", "for", "pub_author", "in", "pub_authors", ":", "if", "compare_names", "(", "author_str", ",", "pub_author", ")", ">=", "50", ":", "return", "None", "# book already in database", "return", "publication" ]
32.828571
21.714286
def _is_connreset(self, exc: BaseException) -> bool: """Return ``True`` if exc is ECONNRESET or equivalent. May be overridden in subclasses. """ return ( isinstance(exc, (socket.error, IOError)) and errno_from_exception(exc) in _ERRNO_CONNRESET )
[ "def", "_is_connreset", "(", "self", ",", "exc", ":", "BaseException", ")", "->", "bool", ":", "return", "(", "isinstance", "(", "exc", ",", "(", "socket", ".", "error", ",", "IOError", ")", ")", "and", "errno_from_exception", "(", "exc", ")", "in", "_ERRNO_CONNRESET", ")" ]
33.666667
15.555556
def parse_timestamp(x): """Parse ISO8601 formatted timestamp.""" dt = dateutil.parser.parse(x) if dt.tzinfo is None: dt = dt.replace(tzinfo=pytz.utc) return dt
[ "def", "parse_timestamp", "(", "x", ")", ":", "dt", "=", "dateutil", ".", "parser", ".", "parse", "(", "x", ")", "if", "dt", ".", "tzinfo", "is", "None", ":", "dt", "=", "dt", ".", "replace", "(", "tzinfo", "=", "pytz", ".", "utc", ")", "return", "dt" ]
29.666667
11
def create_stream(self, stream_id, sandbox=None): """ Create the stream :param stream_id: The stream identifier :param sandbox: The sandbox for this stream :return: None :raises: NotImplementedError """ if sandbox is not None: raise NotImplementedError logging.debug("Creating asset stream {}".format(stream_id)) if stream_id in self.streams: raise StreamAlreadyExistsError("Stream with id '{}' already exists".format(stream_id)) stream = AssetStream(channel=self, stream_id=stream_id, calculated_intervals=None, last_accessed=utcnow(), last_updated=utcnow(), sandbox=sandbox) self.streams[stream_id] = stream return stream
[ "def", "create_stream", "(", "self", ",", "stream_id", ",", "sandbox", "=", "None", ")", ":", "if", "sandbox", "is", "not", "None", ":", "raise", "NotImplementedError", "logging", ".", "debug", "(", "\"Creating asset stream {}\"", ".", "format", "(", "stream_id", ")", ")", "if", "stream_id", "in", "self", ".", "streams", ":", "raise", "StreamAlreadyExistsError", "(", "\"Stream with id '{}' already exists\"", ".", "format", "(", "stream_id", ")", ")", "stream", "=", "AssetStream", "(", "channel", "=", "self", ",", "stream_id", "=", "stream_id", ",", "calculated_intervals", "=", "None", ",", "last_accessed", "=", "utcnow", "(", ")", ",", "last_updated", "=", "utcnow", "(", ")", ",", "sandbox", "=", "sandbox", ")", "self", ".", "streams", "[", "stream_id", "]", "=", "stream", "return", "stream" ]
36.761905
20.857143
def partition(pred, iterable): 'Use a predicate to partition entries into false entries and true entries' # partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9 t1, t2 = itertools.tee(iterable) try: return itertools.ifilterfalse(pred, t1), itertools.ifilter(pred, t2) except: return itertools.filterfalse(pred, t1), filter(pred, t2)
[ "def", "partition", "(", "pred", ",", "iterable", ")", ":", "# partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9", "t1", ",", "t2", "=", "itertools", ".", "tee", "(", "iterable", ")", "try", ":", "return", "itertools", ".", "ifilterfalse", "(", "pred", ",", "t1", ")", ",", "itertools", ".", "ifilter", "(", "pred", ",", "t2", ")", "except", ":", "return", "itertools", ".", "filterfalse", "(", "pred", ",", "t1", ")", ",", "filter", "(", "pred", ",", "t2", ")" ]
46
24.75
def estimateabundance(self): """ Estimate the abundance of taxonomic groups """ logging.info('Estimating abundance of taxonomic groups') # Create and start threads for i in range(self.cpus): # Send the threads to the appropriate destination function threads = Thread(target=self.estimate, args=()) # Set the daemon to true - something to do with thread management threads.setDaemon(True) # Start the threading threads.start() with progressbar(self.runmetadata.samples) as bar: for sample in bar: try: if sample.general.combined != 'NA': # Set the name of the abundance report sample.general.abundance = sample.general.combined.split('.')[0] + '_abundance.csv' # if not hasattr(sample, 'commands'): if not sample.commands.datastore: sample.commands = GenObject() # Define system calls sample.commands.target = self.targetcall sample.commands.classify = self.classifycall sample.commands.abundancecall = \ 'cd {} && ./estimate_abundance.sh -D {} -F {} > {}'.format(self.clarkpath, self.databasepath, sample.general.classification, sample.general.abundance) self.abundancequeue.put(sample) except KeyError: pass self.abundancequeue.join()
[ "def", "estimateabundance", "(", "self", ")", ":", "logging", ".", "info", "(", "'Estimating abundance of taxonomic groups'", ")", "# Create and start threads", "for", "i", "in", "range", "(", "self", ".", "cpus", ")", ":", "# Send the threads to the appropriate destination function", "threads", "=", "Thread", "(", "target", "=", "self", ".", "estimate", ",", "args", "=", "(", ")", ")", "# Set the daemon to true - something to do with thread management", "threads", ".", "setDaemon", "(", "True", ")", "# Start the threading", "threads", ".", "start", "(", ")", "with", "progressbar", "(", "self", ".", "runmetadata", ".", "samples", ")", "as", "bar", ":", "for", "sample", "in", "bar", ":", "try", ":", "if", "sample", ".", "general", ".", "combined", "!=", "'NA'", ":", "# Set the name of the abundance report", "sample", ".", "general", ".", "abundance", "=", "sample", ".", "general", ".", "combined", ".", "split", "(", "'.'", ")", "[", "0", "]", "+", "'_abundance.csv'", "# if not hasattr(sample, 'commands'):", "if", "not", "sample", ".", "commands", ".", "datastore", ":", "sample", ".", "commands", "=", "GenObject", "(", ")", "# Define system calls", "sample", ".", "commands", ".", "target", "=", "self", ".", "targetcall", "sample", ".", "commands", ".", "classify", "=", "self", ".", "classifycall", "sample", ".", "commands", ".", "abundancecall", "=", "'cd {} && ./estimate_abundance.sh -D {} -F {} > {}'", ".", "format", "(", "self", ".", "clarkpath", ",", "self", ".", "databasepath", ",", "sample", ".", "general", ".", "classification", ",", "sample", ".", "general", ".", "abundance", ")", "self", ".", "abundancequeue", ".", "put", "(", "sample", ")", "except", "KeyError", ":", "pass", "self", ".", "abundancequeue", ".", "join", "(", ")" ]
53
23.171429
def job_listener(event): '''Listens to completed job''' job_id = event.job.args[0] if event.code == events.EVENT_JOB_MISSED: db.mark_job_as_missed(job_id) elif event.exception: if isinstance(event.exception, util.JobError): error_object = event.exception.as_dict() else: error_object = "\n".join(traceback.format_tb(event.traceback) + [repr(event.exception)]) db.mark_job_as_errored(job_id, error_object) else: db.mark_job_as_completed(job_id, event.retval) api_key = db.get_job(job_id)["api_key"] result_ok = send_result(job_id, api_key) if not result_ok: db.mark_job_as_failed_to_post_result(job_id) # Optionally notify tests that job_listener() has finished. if "_TEST_CALLBACK_URL" in app.config: requests.get(app.config["_TEST_CALLBACK_URL"])
[ "def", "job_listener", "(", "event", ")", ":", "job_id", "=", "event", ".", "job", ".", "args", "[", "0", "]", "if", "event", ".", "code", "==", "events", ".", "EVENT_JOB_MISSED", ":", "db", ".", "mark_job_as_missed", "(", "job_id", ")", "elif", "event", ".", "exception", ":", "if", "isinstance", "(", "event", ".", "exception", ",", "util", ".", "JobError", ")", ":", "error_object", "=", "event", ".", "exception", ".", "as_dict", "(", ")", "else", ":", "error_object", "=", "\"\\n\"", ".", "join", "(", "traceback", ".", "format_tb", "(", "event", ".", "traceback", ")", "+", "[", "repr", "(", "event", ".", "exception", ")", "]", ")", "db", ".", "mark_job_as_errored", "(", "job_id", ",", "error_object", ")", "else", ":", "db", ".", "mark_job_as_completed", "(", "job_id", ",", "event", ".", "retval", ")", "api_key", "=", "db", ".", "get_job", "(", "job_id", ")", "[", "\"api_key\"", "]", "result_ok", "=", "send_result", "(", "job_id", ",", "api_key", ")", "if", "not", "result_ok", ":", "db", ".", "mark_job_as_failed_to_post_result", "(", "job_id", ")", "# Optionally notify tests that job_listener() has finished.", "if", "\"_TEST_CALLBACK_URL\"", "in", "app", ".", "config", ":", "requests", ".", "get", "(", "app", ".", "config", "[", "\"_TEST_CALLBACK_URL\"", "]", ")" ]
34.68
17.64
def cmd(send, msg, args): """Translate something. Syntax: {command} [--from <language code>] [--to <language code>] <text> See https://cloud.google.com/translate/v2/translate-reference#supported_languages for a list of valid language codes """ parser = arguments.ArgParser(args['config']) parser.add_argument('--lang', '--from', default=None) parser.add_argument('--to', default='en') parser.add_argument('msg', nargs='+') try: cmdargs = parser.parse_args(msg) except arguments.ArgumentException as e: send(str(e)) return send(gen_translate(' '.join(cmdargs.msg), cmdargs.lang, cmdargs.to))
[ "def", "cmd", "(", "send", ",", "msg", ",", "args", ")", ":", "parser", "=", "arguments", ".", "ArgParser", "(", "args", "[", "'config'", "]", ")", "parser", ".", "add_argument", "(", "'--lang'", ",", "'--from'", ",", "default", "=", "None", ")", "parser", ".", "add_argument", "(", "'--to'", ",", "default", "=", "'en'", ")", "parser", ".", "add_argument", "(", "'msg'", ",", "nargs", "=", "'+'", ")", "try", ":", "cmdargs", "=", "parser", ".", "parse_args", "(", "msg", ")", "except", "arguments", ".", "ArgumentException", "as", "e", ":", "send", "(", "str", "(", "e", ")", ")", "return", "send", "(", "gen_translate", "(", "' '", ".", "join", "(", "cmdargs", ".", "msg", ")", ",", "cmdargs", ".", "lang", ",", "cmdargs", ".", "to", ")", ")" ]
37.882353
20.941176
def start(self): """ Start a node """ try: # For IOU we need to send the licence everytime if self.node_type == "iou": try: licence = self._project.controller.settings["IOU"]["iourc_content"] except KeyError: raise aiohttp.web.HTTPConflict(text="IOU licence is not configured") yield from self.post("/start", timeout=240, data={"iourc_content": licence}) else: yield from self.post("/start", timeout=240) except asyncio.TimeoutError: raise aiohttp.web.HTTPRequestTimeout(text="Timeout when starting {}".format(self._name))
[ "def", "start", "(", "self", ")", ":", "try", ":", "# For IOU we need to send the licence everytime", "if", "self", ".", "node_type", "==", "\"iou\"", ":", "try", ":", "licence", "=", "self", ".", "_project", ".", "controller", ".", "settings", "[", "\"IOU\"", "]", "[", "\"iourc_content\"", "]", "except", "KeyError", ":", "raise", "aiohttp", ".", "web", ".", "HTTPConflict", "(", "text", "=", "\"IOU licence is not configured\"", ")", "yield", "from", "self", ".", "post", "(", "\"/start\"", ",", "timeout", "=", "240", ",", "data", "=", "{", "\"iourc_content\"", ":", "licence", "}", ")", "else", ":", "yield", "from", "self", ".", "post", "(", "\"/start\"", ",", "timeout", "=", "240", ")", "except", "asyncio", ".", "TimeoutError", ":", "raise", "aiohttp", ".", "web", ".", "HTTPRequestTimeout", "(", "text", "=", "\"Timeout when starting {}\"", ".", "format", "(", "self", ".", "_name", ")", ")" ]
43.6875
23.3125
def get_offset(cls, info): """Calculate the offset to the Xing header from the start of the MPEG header including sync based on the MPEG header's content. """ assert info.layer == 3 if info.version == 1: if info.mode != 3: return 36 else: return 21 else: if info.mode != 3: return 21 else: return 13
[ "def", "get_offset", "(", "cls", ",", "info", ")", ":", "assert", "info", ".", "layer", "==", "3", "if", "info", ".", "version", "==", "1", ":", "if", "info", ".", "mode", "!=", "3", ":", "return", "36", "else", ":", "return", "21", "else", ":", "if", "info", ".", "mode", "!=", "3", ":", "return", "21", "else", ":", "return", "13" ]
26.176471
17.529412
def write_hdf5_dict(flags, output, path=None, append=False, overwrite=False, **kwargs): """Write this `DataQualityFlag` to a `h5py.Group`. This allows writing to an HDF5-format file. Parameters ---------- output : `str`, :class:`h5py.Group` path to new output file, or open h5py `Group` to write to. path : `str` the HDF5 group path in which to write a new group for this flag **kwargs other keyword arguments passed to :meth:`h5py.Group.create_dataset` Returns ------- dqfgroup : :class:`h5py.Group` HDF group containing these data. This group contains 'active' and 'known' datasets, and metadata attrs. See also -------- astropy.io for details on acceptable keyword arguments when writing a :class:`~astropy.table.Table` to HDF5 """ if path: try: parent = output[path] except KeyError: parent = output.create_group(path) else: parent = output for name in flags: # handle existing group if name in parent: if not (overwrite and append): raise IOError("Group '%s' already exists, give ``append=True, " "overwrite=True`` to overwrite it" % os.path.join(parent.name, name)) del parent[name] # create group group = parent.create_group(name) # write flag write_hdf5_flag_group(flags[name], group, **kwargs)
[ "def", "write_hdf5_dict", "(", "flags", ",", "output", ",", "path", "=", "None", ",", "append", "=", "False", ",", "overwrite", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "path", ":", "try", ":", "parent", "=", "output", "[", "path", "]", "except", "KeyError", ":", "parent", "=", "output", ".", "create_group", "(", "path", ")", "else", ":", "parent", "=", "output", "for", "name", "in", "flags", ":", "# handle existing group", "if", "name", "in", "parent", ":", "if", "not", "(", "overwrite", "and", "append", ")", ":", "raise", "IOError", "(", "\"Group '%s' already exists, give ``append=True, \"", "\"overwrite=True`` to overwrite it\"", "%", "os", ".", "path", ".", "join", "(", "parent", ".", "name", ",", "name", ")", ")", "del", "parent", "[", "name", "]", "# create group", "group", "=", "parent", ".", "create_group", "(", "name", ")", "# write flag", "write_hdf5_flag_group", "(", "flags", "[", "name", "]", ",", "group", ",", "*", "*", "kwargs", ")" ]
30.632653
22
def as_xml(self,parent): """Create vcard-tmp XML representation of the field. :Parameters: - `parent`: parent node for the element :Types: - `parent`: `libxml2.xmlNode` :return: xml node with the field data. :returntype: `libxml2.xmlNode`""" name=to_utf8(self.name.upper()) content=self.value.as_utf8() return parent.newTextChild(None, name, content)
[ "def", "as_xml", "(", "self", ",", "parent", ")", ":", "name", "=", "to_utf8", "(", "self", ".", "name", ".", "upper", "(", ")", ")", "content", "=", "self", ".", "value", ".", "as_utf8", "(", ")", "return", "parent", ".", "newTextChild", "(", "None", ",", "name", ",", "content", ")" ]
32.923077
13.769231
def days_till_due(self): """ Returns the number of days till the due date. Returns a negative number of days when the due date is in the past. Returns 0 when the task has no due date. """ due = self.due_date() if due: diff = due - date.today() return diff.days return 0
[ "def", "days_till_due", "(", "self", ")", ":", "due", "=", "self", ".", "due_date", "(", ")", "if", "due", ":", "diff", "=", "due", "-", "date", ".", "today", "(", ")", "return", "diff", ".", "days", "return", "0" ]
31.545455
13.363636
def toggle_view(self, checked): """Toggle view""" if checked: self.dockwidget.show() self.dockwidget.raise_() # Start a client in case there are none shown if not self.clients: if self.main.is_setting_up: self.create_new_client(give_focus=False) else: self.create_new_client(give_focus=True) else: self.dockwidget.hide()
[ "def", "toggle_view", "(", "self", ",", "checked", ")", ":", "if", "checked", ":", "self", ".", "dockwidget", ".", "show", "(", ")", "self", ".", "dockwidget", ".", "raise_", "(", ")", "# Start a client in case there are none shown\r", "if", "not", "self", ".", "clients", ":", "if", "self", ".", "main", ".", "is_setting_up", ":", "self", ".", "create_new_client", "(", "give_focus", "=", "False", ")", "else", ":", "self", ".", "create_new_client", "(", "give_focus", "=", "True", ")", "else", ":", "self", ".", "dockwidget", ".", "hide", "(", ")" ]
36.615385
12
def remove_callback(self, name, callback): """ Remove a previously-added callback Parameters ---------- name : str The instance to remove the callback from. func : func The callback function to remove """ if self.is_callback_property(name): prop = getattr(type(self), name) try: prop.remove_callback(self, callback) except ValueError: # pragma: nocover pass # Be forgiving if callback was already removed before else: raise TypeError("attribute '{0}' is not a callback property".format(name))
[ "def", "remove_callback", "(", "self", ",", "name", ",", "callback", ")", ":", "if", "self", ".", "is_callback_property", "(", "name", ")", ":", "prop", "=", "getattr", "(", "type", "(", "self", ")", ",", "name", ")", "try", ":", "prop", ".", "remove_callback", "(", "self", ",", "callback", ")", "except", "ValueError", ":", "# pragma: nocover", "pass", "# Be forgiving if callback was already removed before", "else", ":", "raise", "TypeError", "(", "\"attribute '{0}' is not a callback property\"", ".", "format", "(", "name", ")", ")" ]
32.65
17.35
def validate_options(options): """Validates options.""" kwcase = options.get('keyword_case') if kwcase not in [None, 'upper', 'lower', 'capitalize']: raise SQLParseError('Invalid value for keyword_case: ' '{0!r}'.format(kwcase)) idcase = options.get('identifier_case') if idcase not in [None, 'upper', 'lower', 'capitalize']: raise SQLParseError('Invalid value for identifier_case: ' '{0!r}'.format(idcase)) ofrmt = options.get('output_format') if ofrmt not in [None, 'sql', 'python', 'php']: raise SQLParseError('Unknown output format: ' '{0!r}'.format(ofrmt)) strip_comments = options.get('strip_comments', False) if strip_comments not in [True, False]: raise SQLParseError('Invalid value for strip_comments: ' '{0!r}'.format(strip_comments)) space_around_operators = options.get('use_space_around_operators', False) if space_around_operators not in [True, False]: raise SQLParseError('Invalid value for use_space_around_operators: ' '{0!r}'.format(space_around_operators)) strip_ws = options.get('strip_whitespace', False) if strip_ws not in [True, False]: raise SQLParseError('Invalid value for strip_whitespace: ' '{0!r}'.format(strip_ws)) truncate_strings = options.get('truncate_strings') if truncate_strings is not None: try: truncate_strings = int(truncate_strings) except (ValueError, TypeError): raise SQLParseError('Invalid value for truncate_strings: ' '{0!r}'.format(truncate_strings)) if truncate_strings <= 1: raise SQLParseError('Invalid value for truncate_strings: ' '{0!r}'.format(truncate_strings)) options['truncate_strings'] = truncate_strings options['truncate_char'] = options.get('truncate_char', '[...]') indent_columns = options.get('indent_columns', False) if indent_columns not in [True, False]: raise SQLParseError('Invalid value for indent_columns: ' '{0!r}'.format(indent_columns)) elif indent_columns: options['reindent'] = True # enforce reindent options['indent_columns'] = indent_columns reindent = options.get('reindent', False) if reindent not in [True, False]: raise SQLParseError('Invalid value for reindent: ' '{0!r}'.format(reindent)) elif reindent: options['strip_whitespace'] = True reindent_aligned = options.get('reindent_aligned', False) if reindent_aligned not in [True, False]: raise SQLParseError('Invalid value for reindent_aligned: ' '{0!r}'.format(reindent)) elif reindent_aligned: options['strip_whitespace'] = True indent_after_first = options.get('indent_after_first', False) if indent_after_first not in [True, False]: raise SQLParseError('Invalid value for indent_after_first: ' '{0!r}'.format(indent_after_first)) options['indent_after_first'] = indent_after_first indent_tabs = options.get('indent_tabs', False) if indent_tabs not in [True, False]: raise SQLParseError('Invalid value for indent_tabs: ' '{0!r}'.format(indent_tabs)) elif indent_tabs: options['indent_char'] = '\t' else: options['indent_char'] = ' ' indent_width = options.get('indent_width', 2) try: indent_width = int(indent_width) except (TypeError, ValueError): raise SQLParseError('indent_width requires an integer') if indent_width < 1: raise SQLParseError('indent_width requires a positive integer') options['indent_width'] = indent_width wrap_after = options.get('wrap_after', 0) try: wrap_after = int(wrap_after) except (TypeError, ValueError): raise SQLParseError('wrap_after requires an integer') if wrap_after < 0: raise SQLParseError('wrap_after requires a positive integer') options['wrap_after'] = wrap_after comma_first = options.get('comma_first', False) if comma_first not in [True, False]: raise SQLParseError('comma_first requires a boolean value') options['comma_first'] = comma_first right_margin = options.get('right_margin') if right_margin is not None: try: right_margin = int(right_margin) except (TypeError, ValueError): raise SQLParseError('right_margin requires an integer') if right_margin < 10: raise SQLParseError('right_margin requires an integer > 10') options['right_margin'] = right_margin return options
[ "def", "validate_options", "(", "options", ")", ":", "kwcase", "=", "options", ".", "get", "(", "'keyword_case'", ")", "if", "kwcase", "not", "in", "[", "None", ",", "'upper'", ",", "'lower'", ",", "'capitalize'", "]", ":", "raise", "SQLParseError", "(", "'Invalid value for keyword_case: '", "'{0!r}'", ".", "format", "(", "kwcase", ")", ")", "idcase", "=", "options", ".", "get", "(", "'identifier_case'", ")", "if", "idcase", "not", "in", "[", "None", ",", "'upper'", ",", "'lower'", ",", "'capitalize'", "]", ":", "raise", "SQLParseError", "(", "'Invalid value for identifier_case: '", "'{0!r}'", ".", "format", "(", "idcase", ")", ")", "ofrmt", "=", "options", ".", "get", "(", "'output_format'", ")", "if", "ofrmt", "not", "in", "[", "None", ",", "'sql'", ",", "'python'", ",", "'php'", "]", ":", "raise", "SQLParseError", "(", "'Unknown output format: '", "'{0!r}'", ".", "format", "(", "ofrmt", ")", ")", "strip_comments", "=", "options", ".", "get", "(", "'strip_comments'", ",", "False", ")", "if", "strip_comments", "not", "in", "[", "True", ",", "False", "]", ":", "raise", "SQLParseError", "(", "'Invalid value for strip_comments: '", "'{0!r}'", ".", "format", "(", "strip_comments", ")", ")", "space_around_operators", "=", "options", ".", "get", "(", "'use_space_around_operators'", ",", "False", ")", "if", "space_around_operators", "not", "in", "[", "True", ",", "False", "]", ":", "raise", "SQLParseError", "(", "'Invalid value for use_space_around_operators: '", "'{0!r}'", ".", "format", "(", "space_around_operators", ")", ")", "strip_ws", "=", "options", ".", "get", "(", "'strip_whitespace'", ",", "False", ")", "if", "strip_ws", "not", "in", "[", "True", ",", "False", "]", ":", "raise", "SQLParseError", "(", "'Invalid value for strip_whitespace: '", "'{0!r}'", ".", "format", "(", "strip_ws", ")", ")", "truncate_strings", "=", "options", ".", "get", "(", "'truncate_strings'", ")", "if", "truncate_strings", "is", "not", "None", ":", "try", ":", "truncate_strings", "=", "int", "(", "truncate_strings", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "raise", "SQLParseError", "(", "'Invalid value for truncate_strings: '", "'{0!r}'", ".", "format", "(", "truncate_strings", ")", ")", "if", "truncate_strings", "<=", "1", ":", "raise", "SQLParseError", "(", "'Invalid value for truncate_strings: '", "'{0!r}'", ".", "format", "(", "truncate_strings", ")", ")", "options", "[", "'truncate_strings'", "]", "=", "truncate_strings", "options", "[", "'truncate_char'", "]", "=", "options", ".", "get", "(", "'truncate_char'", ",", "'[...]'", ")", "indent_columns", "=", "options", ".", "get", "(", "'indent_columns'", ",", "False", ")", "if", "indent_columns", "not", "in", "[", "True", ",", "False", "]", ":", "raise", "SQLParseError", "(", "'Invalid value for indent_columns: '", "'{0!r}'", ".", "format", "(", "indent_columns", ")", ")", "elif", "indent_columns", ":", "options", "[", "'reindent'", "]", "=", "True", "# enforce reindent", "options", "[", "'indent_columns'", "]", "=", "indent_columns", "reindent", "=", "options", ".", "get", "(", "'reindent'", ",", "False", ")", "if", "reindent", "not", "in", "[", "True", ",", "False", "]", ":", "raise", "SQLParseError", "(", "'Invalid value for reindent: '", "'{0!r}'", ".", "format", "(", "reindent", ")", ")", "elif", "reindent", ":", "options", "[", "'strip_whitespace'", "]", "=", "True", "reindent_aligned", "=", "options", ".", "get", "(", "'reindent_aligned'", ",", "False", ")", "if", "reindent_aligned", "not", "in", "[", "True", ",", "False", "]", ":", "raise", "SQLParseError", "(", "'Invalid value for reindent_aligned: '", "'{0!r}'", ".", "format", "(", "reindent", ")", ")", "elif", "reindent_aligned", ":", "options", "[", "'strip_whitespace'", "]", "=", "True", "indent_after_first", "=", "options", ".", "get", "(", "'indent_after_first'", ",", "False", ")", "if", "indent_after_first", "not", "in", "[", "True", ",", "False", "]", ":", "raise", "SQLParseError", "(", "'Invalid value for indent_after_first: '", "'{0!r}'", ".", "format", "(", "indent_after_first", ")", ")", "options", "[", "'indent_after_first'", "]", "=", "indent_after_first", "indent_tabs", "=", "options", ".", "get", "(", "'indent_tabs'", ",", "False", ")", "if", "indent_tabs", "not", "in", "[", "True", ",", "False", "]", ":", "raise", "SQLParseError", "(", "'Invalid value for indent_tabs: '", "'{0!r}'", ".", "format", "(", "indent_tabs", ")", ")", "elif", "indent_tabs", ":", "options", "[", "'indent_char'", "]", "=", "'\\t'", "else", ":", "options", "[", "'indent_char'", "]", "=", "' '", "indent_width", "=", "options", ".", "get", "(", "'indent_width'", ",", "2", ")", "try", ":", "indent_width", "=", "int", "(", "indent_width", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "SQLParseError", "(", "'indent_width requires an integer'", ")", "if", "indent_width", "<", "1", ":", "raise", "SQLParseError", "(", "'indent_width requires a positive integer'", ")", "options", "[", "'indent_width'", "]", "=", "indent_width", "wrap_after", "=", "options", ".", "get", "(", "'wrap_after'", ",", "0", ")", "try", ":", "wrap_after", "=", "int", "(", "wrap_after", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "SQLParseError", "(", "'wrap_after requires an integer'", ")", "if", "wrap_after", "<", "0", ":", "raise", "SQLParseError", "(", "'wrap_after requires a positive integer'", ")", "options", "[", "'wrap_after'", "]", "=", "wrap_after", "comma_first", "=", "options", ".", "get", "(", "'comma_first'", ",", "False", ")", "if", "comma_first", "not", "in", "[", "True", ",", "False", "]", ":", "raise", "SQLParseError", "(", "'comma_first requires a boolean value'", ")", "options", "[", "'comma_first'", "]", "=", "comma_first", "right_margin", "=", "options", ".", "get", "(", "'right_margin'", ")", "if", "right_margin", "is", "not", "None", ":", "try", ":", "right_margin", "=", "int", "(", "right_margin", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "SQLParseError", "(", "'right_margin requires an integer'", ")", "if", "right_margin", "<", "10", ":", "raise", "SQLParseError", "(", "'right_margin requires an integer > 10'", ")", "options", "[", "'right_margin'", "]", "=", "right_margin", "return", "options" ]
40.991379
18.146552
def params_size(event_size, name=None): """The number of `params` needed to create a single distribution.""" with tf.compat.v1.name_scope(name, 'MultivariateNormalTriL_params_size', [event_size]): return event_size + event_size * (event_size + 1) // 2
[ "def", "params_size", "(", "event_size", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "'MultivariateNormalTriL_params_size'", ",", "[", "event_size", "]", ")", ":", "return", "event_size", "+", "event_size", "*", "(", "event_size", "+", "1", ")", "//", "2" ]
58.8
12.8
def vectorAngle(vec1,vec2): ''' vector (x,y,z) or vector field of shape (3,x,y) ''' if vec1.ndim == 1: assert vec2.ndim == 3 vec1 = vectorToField(vec1, vec2.shape[:2]) if vec2.ndim == 1: assert vec1.ndim == 3 vec2 = vectorToField(vec2, vec1.shape[1:]) a = np.arccos( np.einsum('ijk,ijk->jk', vec1,vec2) /( norm(vec1,axis=0) * norm(vec2,axis=0) ) ) #take smaller of both possible angles: ab = np.abs(np.pi-a) with np.errstate(invalid='ignore'): i = a>ab a[i] = ab[i] return a
[ "def", "vectorAngle", "(", "vec1", ",", "vec2", ")", ":", "if", "vec1", ".", "ndim", "==", "1", ":", "assert", "vec2", ".", "ndim", "==", "3", "vec1", "=", "vectorToField", "(", "vec1", ",", "vec2", ".", "shape", "[", ":", "2", "]", ")", "if", "vec2", ".", "ndim", "==", "1", ":", "assert", "vec1", ".", "ndim", "==", "3", "vec2", "=", "vectorToField", "(", "vec2", ",", "vec1", ".", "shape", "[", "1", ":", "]", ")", "a", "=", "np", ".", "arccos", "(", "np", ".", "einsum", "(", "'ijk,ijk->jk'", ",", "vec1", ",", "vec2", ")", "/", "(", "norm", "(", "vec1", ",", "axis", "=", "0", ")", "*", "norm", "(", "vec2", ",", "axis", "=", "0", ")", ")", ")", "#take smaller of both possible angles:", "ab", "=", "np", ".", "abs", "(", "np", ".", "pi", "-", "a", ")", "with", "np", ".", "errstate", "(", "invalid", "=", "'ignore'", ")", ":", "i", "=", "a", ">", "ab", "a", "[", "i", "]", "=", "ab", "[", "i", "]", "return", "a" ]
28.6
17.8
def deep_update(d,d_update): """ Updates the values (deep form) of a given dictionary Parameters: ----------- d : dict dictionary that contains the values to update d_update : dict dictionary to be updated """ for k,v in list(d_update.items()): if isinstance(v,dict): if k in d: deep_update(d[k],v) else: d[k]=v elif isinstance(d,list): d.append({k:v}) else: d[k]=v return d
[ "def", "deep_update", "(", "d", ",", "d_update", ")", ":", "for", "k", ",", "v", "in", "list", "(", "d_update", ".", "items", "(", ")", ")", ":", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "if", "k", "in", "d", ":", "deep_update", "(", "d", "[", "k", "]", ",", "v", ")", "else", ":", "d", "[", "k", "]", "=", "v", "elif", "isinstance", "(", "d", ",", "list", ")", ":", "d", ".", "append", "(", "{", "k", ":", "v", "}", ")", "else", ":", "d", "[", "k", "]", "=", "v", "return", "d" ]
18.045455
20.590909
def empty(self): ''' Method to empty attributes, particularly for use when object is deleted but remains as variable ''' self.resource = None self.delivery = None self.data = None self.stream = False self.mimetype = None self.location = None
[ "def", "empty", "(", "self", ")", ":", "self", ".", "resource", "=", "None", "self", ".", "delivery", "=", "None", "self", ".", "data", "=", "None", "self", ".", "stream", "=", "False", "self", ".", "mimetype", "=", "None", "self", ".", "location", "=", "None" ]
19.307692
23.461538
def recordlookup(table, key, dictionary=None): """ Load a dictionary with data from the given table, mapping to record objects. """ if dictionary is None: dictionary = dict() it = iter(table) hdr = next(it) flds = list(map(text_type, hdr)) keyindices = asindices(hdr, key) assert len(keyindices) > 0, 'no key selected' getkey = operator.itemgetter(*keyindices) for row in it: k = getkey(row) rec = Record(row, flds) if k in dictionary: # work properly with shelve l = dictionary[k] l.append(rec) dictionary[k] = l else: dictionary[k] = [rec] return dictionary
[ "def", "recordlookup", "(", "table", ",", "key", ",", "dictionary", "=", "None", ")", ":", "if", "dictionary", "is", "None", ":", "dictionary", "=", "dict", "(", ")", "it", "=", "iter", "(", "table", ")", "hdr", "=", "next", "(", "it", ")", "flds", "=", "list", "(", "map", "(", "text_type", ",", "hdr", ")", ")", "keyindices", "=", "asindices", "(", "hdr", ",", "key", ")", "assert", "len", "(", "keyindices", ")", ">", "0", ",", "'no key selected'", "getkey", "=", "operator", ".", "itemgetter", "(", "*", "keyindices", ")", "for", "row", "in", "it", ":", "k", "=", "getkey", "(", "row", ")", "rec", "=", "Record", "(", "row", ",", "flds", ")", "if", "k", "in", "dictionary", ":", "# work properly with shelve", "l", "=", "dictionary", "[", "k", "]", "l", ".", "append", "(", "rec", ")", "dictionary", "[", "k", "]", "=", "l", "else", ":", "dictionary", "[", "k", "]", "=", "[", "rec", "]", "return", "dictionary" ]
26.346154
15.730769
def parse_pubkey(blob): """ Parse SSH public key from given blob. Construct a verifier for ECDSA signatures. The verifier returns the signatures in the required SSH format. Currently, NIST256P1 and ED25519 elliptic curves are supported. """ fp = fingerprint(blob) s = io.BytesIO(blob) key_type = util.read_frame(s) log.debug('key type: %s', key_type) assert key_type in SUPPORTED_KEY_TYPES, key_type result = {'blob': blob, 'type': key_type, 'fingerprint': fp} if key_type == SSH_NIST256_KEY_TYPE: curve_name = util.read_frame(s) log.debug('curve name: %s', curve_name) point = util.read_frame(s) assert s.read() == b'' _type, point = point[:1], point[1:] assert _type == SSH_NIST256_DER_OCTET size = len(point) // 2 assert len(point) == 2 * size coords = (util.bytes2num(point[:size]), util.bytes2num(point[size:])) curve = ecdsa.NIST256p point = ecdsa.ellipticcurve.Point(curve.curve, *coords) def ecdsa_verifier(sig, msg): assert len(sig) == 2 * size sig_decode = ecdsa.util.sigdecode_string vk = ecdsa.VerifyingKey.from_public_point(point, curve, hashfunc) vk.verify(signature=sig, data=msg, sigdecode=sig_decode) parts = [sig[:size], sig[size:]] return b''.join([util.frame(b'\x00' + p) for p in parts]) result.update(point=coords, curve=CURVE_NIST256, verifier=ecdsa_verifier) if key_type == SSH_ED25519_KEY_TYPE: pubkey = util.read_frame(s) assert s.read() == b'' def ed25519_verify(sig, msg): assert len(sig) == 64 vk = ed25519.VerifyingKey(pubkey) vk.verify(sig, msg) return sig result.update(curve=CURVE_ED25519, verifier=ed25519_verify) return result
[ "def", "parse_pubkey", "(", "blob", ")", ":", "fp", "=", "fingerprint", "(", "blob", ")", "s", "=", "io", ".", "BytesIO", "(", "blob", ")", "key_type", "=", "util", ".", "read_frame", "(", "s", ")", "log", ".", "debug", "(", "'key type: %s'", ",", "key_type", ")", "assert", "key_type", "in", "SUPPORTED_KEY_TYPES", ",", "key_type", "result", "=", "{", "'blob'", ":", "blob", ",", "'type'", ":", "key_type", ",", "'fingerprint'", ":", "fp", "}", "if", "key_type", "==", "SSH_NIST256_KEY_TYPE", ":", "curve_name", "=", "util", ".", "read_frame", "(", "s", ")", "log", ".", "debug", "(", "'curve name: %s'", ",", "curve_name", ")", "point", "=", "util", ".", "read_frame", "(", "s", ")", "assert", "s", ".", "read", "(", ")", "==", "b''", "_type", ",", "point", "=", "point", "[", ":", "1", "]", ",", "point", "[", "1", ":", "]", "assert", "_type", "==", "SSH_NIST256_DER_OCTET", "size", "=", "len", "(", "point", ")", "//", "2", "assert", "len", "(", "point", ")", "==", "2", "*", "size", "coords", "=", "(", "util", ".", "bytes2num", "(", "point", "[", ":", "size", "]", ")", ",", "util", ".", "bytes2num", "(", "point", "[", "size", ":", "]", ")", ")", "curve", "=", "ecdsa", ".", "NIST256p", "point", "=", "ecdsa", ".", "ellipticcurve", ".", "Point", "(", "curve", ".", "curve", ",", "*", "coords", ")", "def", "ecdsa_verifier", "(", "sig", ",", "msg", ")", ":", "assert", "len", "(", "sig", ")", "==", "2", "*", "size", "sig_decode", "=", "ecdsa", ".", "util", ".", "sigdecode_string", "vk", "=", "ecdsa", ".", "VerifyingKey", ".", "from_public_point", "(", "point", ",", "curve", ",", "hashfunc", ")", "vk", ".", "verify", "(", "signature", "=", "sig", ",", "data", "=", "msg", ",", "sigdecode", "=", "sig_decode", ")", "parts", "=", "[", "sig", "[", ":", "size", "]", ",", "sig", "[", "size", ":", "]", "]", "return", "b''", ".", "join", "(", "[", "util", ".", "frame", "(", "b'\\x00'", "+", "p", ")", "for", "p", "in", "parts", "]", ")", "result", ".", "update", "(", "point", "=", "coords", ",", "curve", "=", "CURVE_NIST256", ",", "verifier", "=", "ecdsa_verifier", ")", "if", "key_type", "==", "SSH_ED25519_KEY_TYPE", ":", "pubkey", "=", "util", ".", "read_frame", "(", "s", ")", "assert", "s", ".", "read", "(", ")", "==", "b''", "def", "ed25519_verify", "(", "sig", ",", "msg", ")", ":", "assert", "len", "(", "sig", ")", "==", "64", "vk", "=", "ed25519", ".", "VerifyingKey", "(", "pubkey", ")", "vk", ".", "verify", "(", "sig", ",", "msg", ")", "return", "sig", "result", ".", "update", "(", "curve", "=", "CURVE_ED25519", ",", "verifier", "=", "ed25519_verify", ")", "return", "result" ]
34.37037
16.851852
def kms_decrypt_value(self, lookup): """ Args: lookup: the encrypted value to be decrypted by KMS; base64 encoded Returns: The decrypted lookup value """ decrypted_lookup = ef_utils.kms_decrypt(EFAwsResolver.__CLIENTS["kms"], lookup) return decrypted_lookup
[ "def", "kms_decrypt_value", "(", "self", ",", "lookup", ")", ":", "decrypted_lookup", "=", "ef_utils", ".", "kms_decrypt", "(", "EFAwsResolver", ".", "__CLIENTS", "[", "\"kms\"", "]", ",", "lookup", ")", "return", "decrypted_lookup" ]
31.666667
17.666667
def upload_delete(self, token, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/attachments#delete-upload" api_path = "/api/v2/uploads/{token}.json" api_path = api_path.format(token=token) return self.call(api_path, method="DELETE", **kwargs)
[ "def", "upload_delete", "(", "self", ",", "token", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/uploads/{token}.json\"", "api_path", "=", "api_path", ".", "format", "(", "token", "=", "token", ")", "return", "self", ".", "call", "(", "api_path", ",", "method", "=", "\"DELETE\"", ",", "*", "*", "kwargs", ")" ]
56.4
16.4
def ensure_fixed_length_bytes(var): """Ensure that a variable with vlen bytes is converted to fixed width.""" dims, data, attrs, encoding = unpack_for_encoding(var) if check_vlen_dtype(data.dtype) == bytes: # TODO: figure out how to handle this with dask data = np.asarray(data, dtype=np.string_) return Variable(dims, data, attrs, encoding)
[ "def", "ensure_fixed_length_bytes", "(", "var", ")", ":", "dims", ",", "data", ",", "attrs", ",", "encoding", "=", "unpack_for_encoding", "(", "var", ")", "if", "check_vlen_dtype", "(", "data", ".", "dtype", ")", "==", "bytes", ":", "# TODO: figure out how to handle this with dask", "data", "=", "np", ".", "asarray", "(", "data", ",", "dtype", "=", "np", ".", "string_", ")", "return", "Variable", "(", "dims", ",", "data", ",", "attrs", ",", "encoding", ")" ]
52.428571
8.571429
def get_tunnel_context(self, context_id, **kwargs): """Retrieves the network tunnel context instance. :param int context_id: The id-value representing the context instance. :return dict: Mapping of properties for the tunnel context. :raise SoftLayerAPIError: If a context cannot be found. """ _filter = utils.NestedDict(kwargs.get('filter') or {}) _filter['networkTunnelContexts']['id'] = utils.query_filter(context_id) kwargs['filter'] = _filter.to_dict() contexts = self.account.getNetworkTunnelContexts(**kwargs) if len(contexts) == 0: raise SoftLayerAPIError('SoftLayer_Exception_ObjectNotFound', 'Unable to find object with id of \'{}\'' .format(context_id)) return contexts[0]
[ "def", "get_tunnel_context", "(", "self", ",", "context_id", ",", "*", "*", "kwargs", ")", ":", "_filter", "=", "utils", ".", "NestedDict", "(", "kwargs", ".", "get", "(", "'filter'", ")", "or", "{", "}", ")", "_filter", "[", "'networkTunnelContexts'", "]", "[", "'id'", "]", "=", "utils", ".", "query_filter", "(", "context_id", ")", "kwargs", "[", "'filter'", "]", "=", "_filter", ".", "to_dict", "(", ")", "contexts", "=", "self", ".", "account", ".", "getNetworkTunnelContexts", "(", "*", "*", "kwargs", ")", "if", "len", "(", "contexts", ")", "==", "0", ":", "raise", "SoftLayerAPIError", "(", "'SoftLayer_Exception_ObjectNotFound'", ",", "'Unable to find object with id of \\'{}\\''", ".", "format", "(", "context_id", ")", ")", "return", "contexts", "[", "0", "]" ]
49.411765
22.352941
def create_head(nf:int, nc:int, lin_ftrs:Optional[Collection[int]]=None, ps:Floats=0.5, concat_pool:bool=True, bn_final:bool=False): "Model head that takes `nf` features, runs through `lin_ftrs`, and about `nc` classes." lin_ftrs = [nf, 512, nc] if lin_ftrs is None else [nf] + lin_ftrs + [nc] ps = listify(ps) if len(ps) == 1: ps = [ps[0]/2] * (len(lin_ftrs)-2) + ps actns = [nn.ReLU(inplace=True)] * (len(lin_ftrs)-2) + [None] pool = AdaptiveConcatPool2d() if concat_pool else nn.AdaptiveAvgPool2d(1) layers = [pool, Flatten()] for ni,no,p,actn in zip(lin_ftrs[:-1], lin_ftrs[1:], ps, actns): layers += bn_drop_lin(ni, no, True, p, actn) if bn_final: layers.append(nn.BatchNorm1d(lin_ftrs[-1], momentum=0.01)) return nn.Sequential(*layers)
[ "def", "create_head", "(", "nf", ":", "int", ",", "nc", ":", "int", ",", "lin_ftrs", ":", "Optional", "[", "Collection", "[", "int", "]", "]", "=", "None", ",", "ps", ":", "Floats", "=", "0.5", ",", "concat_pool", ":", "bool", "=", "True", ",", "bn_final", ":", "bool", "=", "False", ")", ":", "lin_ftrs", "=", "[", "nf", ",", "512", ",", "nc", "]", "if", "lin_ftrs", "is", "None", "else", "[", "nf", "]", "+", "lin_ftrs", "+", "[", "nc", "]", "ps", "=", "listify", "(", "ps", ")", "if", "len", "(", "ps", ")", "==", "1", ":", "ps", "=", "[", "ps", "[", "0", "]", "/", "2", "]", "*", "(", "len", "(", "lin_ftrs", ")", "-", "2", ")", "+", "ps", "actns", "=", "[", "nn", ".", "ReLU", "(", "inplace", "=", "True", ")", "]", "*", "(", "len", "(", "lin_ftrs", ")", "-", "2", ")", "+", "[", "None", "]", "pool", "=", "AdaptiveConcatPool2d", "(", ")", "if", "concat_pool", "else", "nn", ".", "AdaptiveAvgPool2d", "(", "1", ")", "layers", "=", "[", "pool", ",", "Flatten", "(", ")", "]", "for", "ni", ",", "no", ",", "p", ",", "actn", "in", "zip", "(", "lin_ftrs", "[", ":", "-", "1", "]", ",", "lin_ftrs", "[", "1", ":", "]", ",", "ps", ",", "actns", ")", ":", "layers", "+=", "bn_drop_lin", "(", "ni", ",", "no", ",", "True", ",", "p", ",", "actn", ")", "if", "bn_final", ":", "layers", ".", "append", "(", "nn", ".", "BatchNorm1d", "(", "lin_ftrs", "[", "-", "1", "]", ",", "momentum", "=", "0.01", ")", ")", "return", "nn", ".", "Sequential", "(", "*", "layers", ")" ]
61
26.692308
def cancel_scheduled_play(self, call_params): """REST Cancel a Scheduled Play Helper """ path = '/' + self.api_version + '/CancelScheduledPlay/' method = 'POST' return self.request(path, method, call_params)
[ "def", "cancel_scheduled_play", "(", "self", ",", "call_params", ")", ":", "path", "=", "'/'", "+", "self", ".", "api_version", "+", "'/CancelScheduledPlay/'", "method", "=", "'POST'", "return", "self", ".", "request", "(", "path", ",", "method", ",", "call_params", ")" ]
40.333333
9.833333
def page(self, status=values.unset, iccid=values.unset, rate_plan=values.unset, e_id=values.unset, sim_registration_code=values.unset, page_token=values.unset, page_number=values.unset, page_size=values.unset): """ Retrieve a single page of SimInstance records from the API. Request is executed immediately :param unicode status: The status :param unicode iccid: The iccid :param unicode rate_plan: The rate_plan :param unicode e_id: The e_id :param unicode sim_registration_code: The sim_registration_code :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of SimInstance :rtype: twilio.rest.preview.wireless.sim.SimPage """ params = values.of({ 'Status': status, 'Iccid': iccid, 'RatePlan': rate_plan, 'EId': e_id, 'SimRegistrationCode': sim_registration_code, 'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page( 'GET', self._uri, params=params, ) return SimPage(self._version, response, self._solution)
[ "def", "page", "(", "self", ",", "status", "=", "values", ".", "unset", ",", "iccid", "=", "values", ".", "unset", ",", "rate_plan", "=", "values", ".", "unset", ",", "e_id", "=", "values", ".", "unset", ",", "sim_registration_code", "=", "values", ".", "unset", ",", "page_token", "=", "values", ".", "unset", ",", "page_number", "=", "values", ".", "unset", ",", "page_size", "=", "values", ".", "unset", ")", ":", "params", "=", "values", ".", "of", "(", "{", "'Status'", ":", "status", ",", "'Iccid'", ":", "iccid", ",", "'RatePlan'", ":", "rate_plan", ",", "'EId'", ":", "e_id", ",", "'SimRegistrationCode'", ":", "sim_registration_code", ",", "'PageToken'", ":", "page_token", ",", "'Page'", ":", "page_number", ",", "'PageSize'", ":", "page_size", ",", "}", ")", "response", "=", "self", ".", "_version", ".", "page", "(", "'GET'", ",", "self", ".", "_uri", ",", "params", "=", "params", ",", ")", "return", "SimPage", "(", "self", ".", "_version", ",", "response", ",", "self", ".", "_solution", ")" ]
36.947368
17.631579
def subpacket(subpacket_type, fmt, *values): """Create GPG subpacket.""" blob = struct.pack(fmt, *values) if values else fmt return struct.pack('>B', subpacket_type) + blob
[ "def", "subpacket", "(", "subpacket_type", ",", "fmt", ",", "*", "values", ")", ":", "blob", "=", "struct", ".", "pack", "(", "fmt", ",", "*", "values", ")", "if", "values", "else", "fmt", "return", "struct", ".", "pack", "(", "'>B'", ",", "subpacket_type", ")", "+", "blob" ]
45.25
7.5
def GetHashType(self, hash_str): """Identify the type of hash in a hash string. Args: hash_str: A string value that may be a hash. Returns: A string description of the type of hash. """ # Return the type of the first matching hash. for hash_type, hash_re in self.hashes: if hash_re.match(hash_str): return hash_type # No hash matched. return "EMPTY"
[ "def", "GetHashType", "(", "self", ",", "hash_str", ")", ":", "# Return the type of the first matching hash.", "for", "hash_type", ",", "hash_re", "in", "self", ".", "hashes", ":", "if", "hash_re", ".", "match", "(", "hash_str", ")", ":", "return", "hash_type", "# No hash matched.", "return", "\"EMPTY\"" ]
26.333333
15.866667
def clean_multigame_features(df): """TODO: Docstring for clean_multigame_features. :df: TODO :returns: TODO """ df = pd.DataFrame(df) if df.index.value_counts().max() > 1: df.reset_index(drop=True, inplace=True) df = clean_features(df) # if it's many games in one DataFrame, make poss_id and play_id unique for col in ('play_id', 'poss_id'): diffs = df[col].diff().fillna(0) if (diffs < 0).any(): new_col = np.cumsum(diffs.astype(bool)) df.eval('{} = @new_col'.format(col), inplace=True) return df
[ "def", "clean_multigame_features", "(", "df", ")", ":", "df", "=", "pd", ".", "DataFrame", "(", "df", ")", "if", "df", ".", "index", ".", "value_counts", "(", ")", ".", "max", "(", ")", ">", "1", ":", "df", ".", "reset_index", "(", "drop", "=", "True", ",", "inplace", "=", "True", ")", "df", "=", "clean_features", "(", "df", ")", "# if it's many games in one DataFrame, make poss_id and play_id unique", "for", "col", "in", "(", "'play_id'", ",", "'poss_id'", ")", ":", "diffs", "=", "df", "[", "col", "]", ".", "diff", "(", ")", ".", "fillna", "(", "0", ")", "if", "(", "diffs", "<", "0", ")", ".", "any", "(", ")", ":", "new_col", "=", "np", ".", "cumsum", "(", "diffs", ".", "astype", "(", "bool", ")", ")", "df", ".", "eval", "(", "'{} = @new_col'", ".", "format", "(", "col", ")", ",", "inplace", "=", "True", ")", "return", "df" ]
28.5
17.95
def prepare_jochem(ctx, jochem, output, csoutput): """Process and filter jochem file to produce list of names for dictionary.""" click.echo('chemdataextractor.dict.prepare_jochem') for i, line in enumerate(jochem): print('JC%s' % i) if line.startswith('TM '): if line.endswith(' @match=ci\n'): for tokens in _make_tokens(line[3:-11]): output.write(' '.join(tokens)) output.write('\n') else: for tokens in _make_tokens(line[3:-1]): csoutput.write(' '.join(tokens)) csoutput.write('\n')
[ "def", "prepare_jochem", "(", "ctx", ",", "jochem", ",", "output", ",", "csoutput", ")", ":", "click", ".", "echo", "(", "'chemdataextractor.dict.prepare_jochem'", ")", "for", "i", ",", "line", "in", "enumerate", "(", "jochem", ")", ":", "print", "(", "'JC%s'", "%", "i", ")", "if", "line", ".", "startswith", "(", "'TM '", ")", ":", "if", "line", ".", "endswith", "(", "'\t@match=ci\\n'", ")", ":", "for", "tokens", "in", "_make_tokens", "(", "line", "[", "3", ":", "-", "11", "]", ")", ":", "output", ".", "write", "(", "' '", ".", "join", "(", "tokens", ")", ")", "output", ".", "write", "(", "'\\n'", ")", "else", ":", "for", "tokens", "in", "_make_tokens", "(", "line", "[", "3", ":", "-", "1", "]", ")", ":", "csoutput", ".", "write", "(", "' '", ".", "join", "(", "tokens", ")", ")", "csoutput", ".", "write", "(", "'\\n'", ")" ]
45.357143
9.428571
def to_bel_path(graph, path: str, mode: str = 'w', **kwargs) -> None: """Write the BEL graph as a canonical BEL Script to the given path. :param BELGraph graph: the BEL Graph to output as a BEL Script :param path: A file path :param mode: The file opening mode. Defaults to 'w' """ with open(path, mode=mode, **kwargs) as bel_file: to_bel(graph, bel_file)
[ "def", "to_bel_path", "(", "graph", ",", "path", ":", "str", ",", "mode", ":", "str", "=", "'w'", ",", "*", "*", "kwargs", ")", "->", "None", ":", "with", "open", "(", "path", ",", "mode", "=", "mode", ",", "*", "*", "kwargs", ")", "as", "bel_file", ":", "to_bel", "(", "graph", ",", "bel_file", ")" ]
42.222222
16
def etrago(args): """The etrago function works with following arguments: Parameters ---------- db : str ``'oedb'``, Name of Database session setting stored in *config.ini* of *.egoio* gridversion : NoneType or str ``'v0.2.11'``, Name of the data version number of oedb: state ``'None'`` for model_draft (sand-box) or an explicit version number (e.g. 'v0.2.10') for the grid schema. method : str ``'lopf'``, Choose between a non-linear power flow ('pf') or a linear optimal power flow ('lopf'). pf_post_lopf : bool False, Option to run a non-linear power flow (pf) directly after the linear optimal power flow (and thus the dispatch) has finished. start_snapshot : int 1, Start hour of the scenario year to be calculated. end_snapshot : int 2, End hour of the scenario year to be calculated. solver : str 'glpk', Choose your preferred solver. Current options: 'glpk' (open-source), 'cplex' or 'gurobi'. scn_name : str 'Status Quo', Choose your scenario. Currently, there are three different scenarios: 'Status Quo', 'NEP 2035', 'eGo100'. If you do not want to use the full German dataset, you can use the excerpt of Schleswig-Holstein by adding the acronym SH to the scenario name (e.g. 'SH Status Quo'). scn_extension : NoneType or list None, Choose extension-scenarios which will be added to the existing network container. Data of the extension scenarios are located in extension-tables (e.g. model_draft.ego_grid_pf_hv_extension_bus) with the prefix 'extension_'. Currently there are three overlay networks: 'nep2035_confirmed' includes all planed new lines confirmed by the Bundesnetzagentur 'nep2035_b2' includes all new lines planned by the Netzentwicklungsplan 2025 in scenario 2035 B2 'BE_NO_NEP 2035' includes planned lines to Belgium and Norway and adds BE and NO as electrical neighbours scn_decommissioning : str None, Choose an extra scenario which includes lines you want to decommise from the existing network. Data of the decommissioning scenarios are located in extension-tables (e.g. model_draft.ego_grid_pf_hv_extension_bus) with the prefix 'decommissioning_'. Currently, there are two decommissioning_scenarios which are linked to extension-scenarios: 'nep2035_confirmed' includes all lines that will be replaced in confirmed projects 'nep2035_b2' includes all lines that will be replaced in NEP-scenario 2035 B2 lpfile : obj False, State if and where you want to save pyomo's lp file. Options: False or '/path/tofolder'.import numpy as np csv_export : obj False, State if and where you want to save results as csv files.Options: False or '/path/tofolder'. db_export : bool False, State if you want to export the results of your calculation back to the database. extendable : list ['network', 'storages'], Choose components you want to optimize. Settings can be added in /tools/extendable.py. The most important possibilities: 'network': set all lines, links and transformers extendable 'german_network': set lines and transformers in German grid extendable 'foreign_network': set foreign lines and transformers extendable 'transformers': set all transformers extendable 'overlay_network': set all components of the 'scn_extension' extendable 'storages': allow to install extendable storages (unlimited in size) at each grid node in order to meet the flexibility demand. 'network_preselection': set only preselected lines extendable, method is chosen in function call generator_noise : bool or int State if you want to apply a small random noise to the marginal costs of each generator in order to prevent an optima plateau. To reproduce a noise, choose the same integer (seed number). minimize_loading : bool False, ... ramp_limits : bool False, State if you want to consider ramp limits of generators. Increases time for solving significantly. Only works when calculating at least 30 snapshots. extra_functionality : str or None None, Choose name of extra functionality described in etrago/utilities.py "min_renewable_share" to set a minimal share of renewable energy or "max_line_ext" to set an overall maximum of line expansion. When activating snapshot_clustering or minimize_loading these extra_funtionalities are overwritten and therefore neglected. network_clustering_kmeans : bool or int False, State if you want to apply a clustering of all network buses down to only ``'k'`` buses. The weighting takes place considering generation and load at each node. If so, state the number of k you want to apply. Otherwise put False. This function doesn't work together with ``'line_grouping = True'``. load_cluster : bool or obj state if you want to load cluster coordinates from a previous run: False or /path/tofile (filename similar to ./cluster_coord_k_n_result). network_clustering_ehv : bool False, Choose if you want to cluster the full HV/EHV dataset down to only the EHV buses. In that case, all HV buses are assigned to their closest EHV sub-station, taking into account the shortest distance on power lines. snapshot_clustering : bool or int False, State if you want to cluster the snapshots and run the optimization only on a subset of snapshot periods. The int value defines the number of periods (i.e. days) which will be clustered to. Move to PyPSA branch:features/snapshot_clustering parallelisation : bool False, Choose if you want to calculate a certain number of snapshots in parallel. If yes, define the respective amount in the if-clause execution below. Otherwise state False here. line_grouping : bool True, State if you want to group lines that connect the same two buses into one system. branch_capacity_factor : dict {'HV': 0.5, 'eHV' : 0.7}, Add a factor here if you want to globally change line capacities (e.g. to "consider" an (n-1) criterion or for debugging purposes). load_shedding : bool False, State here if you want to make use of the load shedding function which is helpful when debugging: a very expensive generator is set to each bus and meets the demand when regular generators cannot do so. foreign_lines : dict {'carrier':'AC', 'capacity': 'osmTGmod}' Choose transmission technology and capacity of foreign lines: 'carrier': 'AC' or 'DC' 'capacity': 'osmTGmod', 'ntc_acer' or 'thermal_acer' comments : str None Returns ------- network : `pandas.DataFrame<dataframe>` eTraGo result network based on `PyPSA network <https://www.pypsa.org/doc/components.html#network>`_ """ conn = db.connection(section=args['db']) Session = sessionmaker(bind=conn) session = Session() # additional arguments cfgpath, version, prefix if args['gridversion'] is None: args['ormcls_prefix'] = 'EgoGridPfHv' else: args['ormcls_prefix'] = 'EgoPfHv' scenario = NetworkScenario(session, version=args['gridversion'], prefix=args['ormcls_prefix'], method=args['method'], start_snapshot=args['start_snapshot'], end_snapshot=args['end_snapshot'], scn_name=args['scn_name']) network = scenario.build_network() # add coordinates network = add_coordinates(network) # Set countrytags of buses, lines, links and transformers network = geolocation_buses(network, session) # Set q_sets of foreign loads network = set_q_foreign_loads(network, cos_phi=1) # Change transmission technology and/or capacity of foreign lines if args['foreign_lines']['carrier'] == 'DC': foreign_links(network) network = geolocation_buses(network, session) if args['foreign_lines']['capacity'] != 'osmTGmod': crossborder_capacity( network, args['foreign_lines']['capacity'], args['branch_capacity_factor']) # TEMPORARY vague adjustment due to transformer bug in data processing if args['gridversion'] == 'v0.2.11': network.transformers.x = network.transformers.x * 0.0001 # set SOC at the beginning and end of the period to equal values network.storage_units.cyclic_state_of_charge = True # set extra_functionality if args['extra_functionality'] is not None: extra_functionality = eval(args['extra_functionality']) elif args['extra_functionality'] is None: extra_functionality = args['extra_functionality'] # set disaggregated_network to default disaggregated_network = None # set clustering to default clustering = None if args['generator_noise'] is not False: # add random noise to all generators s = np.random.RandomState(args['generator_noise']) network.generators.marginal_cost[network.generators.bus.isin( network.buses.index[network.buses.country_code == 'DE'])] += \ abs(s.normal(0, 0.1, len(network.generators.marginal_cost[ network.generators.bus.isin(network.buses.index[ network.buses.country_code == 'DE'])]))) # for SH scenario run do data preperation: if (args['scn_name'] == 'SH Status Quo' or args['scn_name'] == 'SH NEP 2035'): data_manipulation_sh(network) # grouping of parallel lines if args['line_grouping']: group_parallel_lines(network) # Branch loading minimization if args['minimize_loading']: extra_functionality = loading_minimization # scenario extensions if args['scn_extension'] is not None: for i in range(len(args['scn_extension'])): network = extension( network, session, version=args['gridversion'], scn_extension=args['scn_extension'][i], start_snapshot=args['start_snapshot'], end_snapshot=args['end_snapshot']) network = geolocation_buses(network, session) # scenario decommissioning if args['scn_decommissioning'] is not None: network = decommissioning( network, session, args) # Add missing lines in Munich and Stuttgart network = add_missing_components(network) # set Branch capacity factor for lines and transformer if args['branch_capacity_factor']: set_branch_capacity(network, args) # investive optimization strategies if args['extendable'] != []: network = extendable( network, args, line_max=4) network = convert_capital_costs( network, args['start_snapshot'], args['end_snapshot']) # skip snapshots if args['skip_snapshots']: network.snapshots = network.snapshots[::args['skip_snapshots']] network.snapshot_weightings = network.snapshot_weightings[ ::args['skip_snapshots']] * args['skip_snapshots'] # snapshot clustering if not args['snapshot_clustering'] is False: network = snapshot_clustering( network, how='daily', clusters=args['snapshot_clustering']) extra_functionality = daily_bounds # daily_bounds or other constraint # load shedding in order to hunt infeasibilities if args['load_shedding']: load_shedding(network) # ehv network clustering if args['network_clustering_ehv']: network.generators.control = "PV" busmap = busmap_from_psql( network, session, scn_name=( args['scn_name'] if args['scn_extension']==None else args['scn_name']+'_ext_'+'_'.join( args['scn_extension']))) network = cluster_on_extra_high_voltage( network, busmap, with_time=True) # k-mean clustering if not args['network_clustering_kmeans'] == False: clustering = kmean_clustering( network, n_clusters=args['network_clustering_kmeans'], load_cluster=args['load_cluster'], line_length_factor=1, remove_stubs=False, use_reduced_coordinates=False, bus_weight_tocsv=None, bus_weight_fromcsv=None, n_init=10, max_iter=100, tol=1e-6, n_jobs=-1) disaggregated_network = ( network.copy() if args.get('disaggregation') else None) network = clustering.network.copy() if args['ramp_limits']: ramp_limits(network) # preselection of extendable lines if 'network_preselection' in args['extendable']: extension_preselection(network, args, 'snapshot_clustering', 2) # parallisation if args['parallelisation']: parallelisation( network, start_snapshot=args['start_snapshot'], end_snapshot=args['end_snapshot'], group_size=1, solver_name=args['solver'], solver_options=args['solver_options'], extra_functionality=extra_functionality) # start linear optimal powerflow calculations elif args['method'] == 'lopf': x = time.time() network.lopf( network.snapshots, solver_name=args['solver'], solver_options=args['solver_options'], extra_functionality=extra_functionality, formulation="angles") y = time.time() z = (y - x) / 60 # z is time for lopf in minutes print("Time for LOPF [min]:", round(z, 2)) # start non-linear powerflow simulation elif args['method'] is 'pf': network.pf(scenario.timeindex) # calc_line_losses(network) if args['pf_post_lopf']: x = time.time() pf_solution = pf_post_lopf(network, args, extra_functionality, add_foreign_lopf=True) y = time.time() z = (y - x) / 60 print("Time for PF [min]:", round(z, 2)) calc_line_losses(network) network = distribute_q(network, allocation='p_nom') if not args['extendable'] == []: print_expansion_costs(network, args) if clustering: disagg = args.get('disaggregation') skip = () if args['pf_post_lopf'] else ('q',) t = time.time() if disagg: if disagg == 'mini': disaggregation = MiniSolverDisaggregation( disaggregated_network, network, clustering, skip=skip) elif disagg == 'uniform': disaggregation = UniformDisaggregation(disaggregated_network, network, clustering, skip=skip) else: raise Exception('Invalid disaggregation command: ' + disagg) disaggregation.execute(scenario, solver=args['solver']) # temporal bug fix for solar generator which ar during night time # nan instead of 0 disaggregated_network.generators_t.p.fillna(0, inplace=True) disaggregated_network.generators_t.q.fillna(0, inplace=True) disaggregated_network.results = network.results print("Time for overall desaggregation [min]: {:.2}" .format((time.time() - t) / 60)) # write lpfile to path if not args['lpfile'] is False: network.model.write( args['lpfile'], io_options={ 'symbolic_solver_labels': True}) # write PyPSA results back to database if args['db_export']: username = str(conn.url).split('//')[1].split(':')[0] args['user_name'] = username results_to_oedb( session, network, dict([("disaggregated_results", False)] + list(args.items())), grid='hv', safe_results=False) if disaggregated_network: results_to_oedb( session, disaggregated_network, dict([("disaggregated_results", True)] + list(args.items())), grid='hv', safe_results=False) # write PyPSA results to csv to path if not args['csv_export'] is False: if not args['pf_post_lopf']: results_to_csv(network, args) else: results_to_csv(network, args, pf_solution=pf_solution) if disaggregated_network: results_to_csv( disaggregated_network, {k: os.path.join(v, 'disaggregated') if k == 'csv_export' else v for k, v in args.items()}) # close session # session.close() return network, disaggregated_network
[ "def", "etrago", "(", "args", ")", ":", "conn", "=", "db", ".", "connection", "(", "section", "=", "args", "[", "'db'", "]", ")", "Session", "=", "sessionmaker", "(", "bind", "=", "conn", ")", "session", "=", "Session", "(", ")", "# additional arguments cfgpath, version, prefix", "if", "args", "[", "'gridversion'", "]", "is", "None", ":", "args", "[", "'ormcls_prefix'", "]", "=", "'EgoGridPfHv'", "else", ":", "args", "[", "'ormcls_prefix'", "]", "=", "'EgoPfHv'", "scenario", "=", "NetworkScenario", "(", "session", ",", "version", "=", "args", "[", "'gridversion'", "]", ",", "prefix", "=", "args", "[", "'ormcls_prefix'", "]", ",", "method", "=", "args", "[", "'method'", "]", ",", "start_snapshot", "=", "args", "[", "'start_snapshot'", "]", ",", "end_snapshot", "=", "args", "[", "'end_snapshot'", "]", ",", "scn_name", "=", "args", "[", "'scn_name'", "]", ")", "network", "=", "scenario", ".", "build_network", "(", ")", "# add coordinates", "network", "=", "add_coordinates", "(", "network", ")", "# Set countrytags of buses, lines, links and transformers", "network", "=", "geolocation_buses", "(", "network", ",", "session", ")", "# Set q_sets of foreign loads", "network", "=", "set_q_foreign_loads", "(", "network", ",", "cos_phi", "=", "1", ")", "# Change transmission technology and/or capacity of foreign lines", "if", "args", "[", "'foreign_lines'", "]", "[", "'carrier'", "]", "==", "'DC'", ":", "foreign_links", "(", "network", ")", "network", "=", "geolocation_buses", "(", "network", ",", "session", ")", "if", "args", "[", "'foreign_lines'", "]", "[", "'capacity'", "]", "!=", "'osmTGmod'", ":", "crossborder_capacity", "(", "network", ",", "args", "[", "'foreign_lines'", "]", "[", "'capacity'", "]", ",", "args", "[", "'branch_capacity_factor'", "]", ")", "# TEMPORARY vague adjustment due to transformer bug in data processing", "if", "args", "[", "'gridversion'", "]", "==", "'v0.2.11'", ":", "network", ".", "transformers", ".", "x", "=", "network", ".", "transformers", ".", "x", "*", "0.0001", "# set SOC at the beginning and end of the period to equal values", "network", ".", "storage_units", ".", "cyclic_state_of_charge", "=", "True", "# set extra_functionality", "if", "args", "[", "'extra_functionality'", "]", "is", "not", "None", ":", "extra_functionality", "=", "eval", "(", "args", "[", "'extra_functionality'", "]", ")", "elif", "args", "[", "'extra_functionality'", "]", "is", "None", ":", "extra_functionality", "=", "args", "[", "'extra_functionality'", "]", "# set disaggregated_network to default", "disaggregated_network", "=", "None", "# set clustering to default", "clustering", "=", "None", "if", "args", "[", "'generator_noise'", "]", "is", "not", "False", ":", "# add random noise to all generators", "s", "=", "np", ".", "random", ".", "RandomState", "(", "args", "[", "'generator_noise'", "]", ")", "network", ".", "generators", ".", "marginal_cost", "[", "network", ".", "generators", ".", "bus", ".", "isin", "(", "network", ".", "buses", ".", "index", "[", "network", ".", "buses", ".", "country_code", "==", "'DE'", "]", ")", "]", "+=", "abs", "(", "s", ".", "normal", "(", "0", ",", "0.1", ",", "len", "(", "network", ".", "generators", ".", "marginal_cost", "[", "network", ".", "generators", ".", "bus", ".", "isin", "(", "network", ".", "buses", ".", "index", "[", "network", ".", "buses", ".", "country_code", "==", "'DE'", "]", ")", "]", ")", ")", ")", "# for SH scenario run do data preperation:", "if", "(", "args", "[", "'scn_name'", "]", "==", "'SH Status Quo'", "or", "args", "[", "'scn_name'", "]", "==", "'SH NEP 2035'", ")", ":", "data_manipulation_sh", "(", "network", ")", "# grouping of parallel lines", "if", "args", "[", "'line_grouping'", "]", ":", "group_parallel_lines", "(", "network", ")", "# Branch loading minimization", "if", "args", "[", "'minimize_loading'", "]", ":", "extra_functionality", "=", "loading_minimization", "# scenario extensions", "if", "args", "[", "'scn_extension'", "]", "is", "not", "None", ":", "for", "i", "in", "range", "(", "len", "(", "args", "[", "'scn_extension'", "]", ")", ")", ":", "network", "=", "extension", "(", "network", ",", "session", ",", "version", "=", "args", "[", "'gridversion'", "]", ",", "scn_extension", "=", "args", "[", "'scn_extension'", "]", "[", "i", "]", ",", "start_snapshot", "=", "args", "[", "'start_snapshot'", "]", ",", "end_snapshot", "=", "args", "[", "'end_snapshot'", "]", ")", "network", "=", "geolocation_buses", "(", "network", ",", "session", ")", "# scenario decommissioning", "if", "args", "[", "'scn_decommissioning'", "]", "is", "not", "None", ":", "network", "=", "decommissioning", "(", "network", ",", "session", ",", "args", ")", "# Add missing lines in Munich and Stuttgart", "network", "=", "add_missing_components", "(", "network", ")", "# set Branch capacity factor for lines and transformer", "if", "args", "[", "'branch_capacity_factor'", "]", ":", "set_branch_capacity", "(", "network", ",", "args", ")", "# investive optimization strategies", "if", "args", "[", "'extendable'", "]", "!=", "[", "]", ":", "network", "=", "extendable", "(", "network", ",", "args", ",", "line_max", "=", "4", ")", "network", "=", "convert_capital_costs", "(", "network", ",", "args", "[", "'start_snapshot'", "]", ",", "args", "[", "'end_snapshot'", "]", ")", "# skip snapshots", "if", "args", "[", "'skip_snapshots'", "]", ":", "network", ".", "snapshots", "=", "network", ".", "snapshots", "[", ":", ":", "args", "[", "'skip_snapshots'", "]", "]", "network", ".", "snapshot_weightings", "=", "network", ".", "snapshot_weightings", "[", ":", ":", "args", "[", "'skip_snapshots'", "]", "]", "*", "args", "[", "'skip_snapshots'", "]", "# snapshot clustering", "if", "not", "args", "[", "'snapshot_clustering'", "]", "is", "False", ":", "network", "=", "snapshot_clustering", "(", "network", ",", "how", "=", "'daily'", ",", "clusters", "=", "args", "[", "'snapshot_clustering'", "]", ")", "extra_functionality", "=", "daily_bounds", "# daily_bounds or other constraint", "# load shedding in order to hunt infeasibilities", "if", "args", "[", "'load_shedding'", "]", ":", "load_shedding", "(", "network", ")", "# ehv network clustering", "if", "args", "[", "'network_clustering_ehv'", "]", ":", "network", ".", "generators", ".", "control", "=", "\"PV\"", "busmap", "=", "busmap_from_psql", "(", "network", ",", "session", ",", "scn_name", "=", "(", "args", "[", "'scn_name'", "]", "if", "args", "[", "'scn_extension'", "]", "==", "None", "else", "args", "[", "'scn_name'", "]", "+", "'_ext_'", "+", "'_'", ".", "join", "(", "args", "[", "'scn_extension'", "]", ")", ")", ")", "network", "=", "cluster_on_extra_high_voltage", "(", "network", ",", "busmap", ",", "with_time", "=", "True", ")", "# k-mean clustering", "if", "not", "args", "[", "'network_clustering_kmeans'", "]", "==", "False", ":", "clustering", "=", "kmean_clustering", "(", "network", ",", "n_clusters", "=", "args", "[", "'network_clustering_kmeans'", "]", ",", "load_cluster", "=", "args", "[", "'load_cluster'", "]", ",", "line_length_factor", "=", "1", ",", "remove_stubs", "=", "False", ",", "use_reduced_coordinates", "=", "False", ",", "bus_weight_tocsv", "=", "None", ",", "bus_weight_fromcsv", "=", "None", ",", "n_init", "=", "10", ",", "max_iter", "=", "100", ",", "tol", "=", "1e-6", ",", "n_jobs", "=", "-", "1", ")", "disaggregated_network", "=", "(", "network", ".", "copy", "(", ")", "if", "args", ".", "get", "(", "'disaggregation'", ")", "else", "None", ")", "network", "=", "clustering", ".", "network", ".", "copy", "(", ")", "if", "args", "[", "'ramp_limits'", "]", ":", "ramp_limits", "(", "network", ")", "# preselection of extendable lines", "if", "'network_preselection'", "in", "args", "[", "'extendable'", "]", ":", "extension_preselection", "(", "network", ",", "args", ",", "'snapshot_clustering'", ",", "2", ")", "# parallisation", "if", "args", "[", "'parallelisation'", "]", ":", "parallelisation", "(", "network", ",", "start_snapshot", "=", "args", "[", "'start_snapshot'", "]", ",", "end_snapshot", "=", "args", "[", "'end_snapshot'", "]", ",", "group_size", "=", "1", ",", "solver_name", "=", "args", "[", "'solver'", "]", ",", "solver_options", "=", "args", "[", "'solver_options'", "]", ",", "extra_functionality", "=", "extra_functionality", ")", "# start linear optimal powerflow calculations", "elif", "args", "[", "'method'", "]", "==", "'lopf'", ":", "x", "=", "time", ".", "time", "(", ")", "network", ".", "lopf", "(", "network", ".", "snapshots", ",", "solver_name", "=", "args", "[", "'solver'", "]", ",", "solver_options", "=", "args", "[", "'solver_options'", "]", ",", "extra_functionality", "=", "extra_functionality", ",", "formulation", "=", "\"angles\"", ")", "y", "=", "time", ".", "time", "(", ")", "z", "=", "(", "y", "-", "x", ")", "/", "60", "# z is time for lopf in minutes", "print", "(", "\"Time for LOPF [min]:\"", ",", "round", "(", "z", ",", "2", ")", ")", "# start non-linear powerflow simulation", "elif", "args", "[", "'method'", "]", "is", "'pf'", ":", "network", ".", "pf", "(", "scenario", ".", "timeindex", ")", "# calc_line_losses(network)", "if", "args", "[", "'pf_post_lopf'", "]", ":", "x", "=", "time", ".", "time", "(", ")", "pf_solution", "=", "pf_post_lopf", "(", "network", ",", "args", ",", "extra_functionality", ",", "add_foreign_lopf", "=", "True", ")", "y", "=", "time", ".", "time", "(", ")", "z", "=", "(", "y", "-", "x", ")", "/", "60", "print", "(", "\"Time for PF [min]:\"", ",", "round", "(", "z", ",", "2", ")", ")", "calc_line_losses", "(", "network", ")", "network", "=", "distribute_q", "(", "network", ",", "allocation", "=", "'p_nom'", ")", "if", "not", "args", "[", "'extendable'", "]", "==", "[", "]", ":", "print_expansion_costs", "(", "network", ",", "args", ")", "if", "clustering", ":", "disagg", "=", "args", ".", "get", "(", "'disaggregation'", ")", "skip", "=", "(", ")", "if", "args", "[", "'pf_post_lopf'", "]", "else", "(", "'q'", ",", ")", "t", "=", "time", ".", "time", "(", ")", "if", "disagg", ":", "if", "disagg", "==", "'mini'", ":", "disaggregation", "=", "MiniSolverDisaggregation", "(", "disaggregated_network", ",", "network", ",", "clustering", ",", "skip", "=", "skip", ")", "elif", "disagg", "==", "'uniform'", ":", "disaggregation", "=", "UniformDisaggregation", "(", "disaggregated_network", ",", "network", ",", "clustering", ",", "skip", "=", "skip", ")", "else", ":", "raise", "Exception", "(", "'Invalid disaggregation command: '", "+", "disagg", ")", "disaggregation", ".", "execute", "(", "scenario", ",", "solver", "=", "args", "[", "'solver'", "]", ")", "# temporal bug fix for solar generator which ar during night time", "# nan instead of 0", "disaggregated_network", ".", "generators_t", ".", "p", ".", "fillna", "(", "0", ",", "inplace", "=", "True", ")", "disaggregated_network", ".", "generators_t", ".", "q", ".", "fillna", "(", "0", ",", "inplace", "=", "True", ")", "disaggregated_network", ".", "results", "=", "network", ".", "results", "print", "(", "\"Time for overall desaggregation [min]: {:.2}\"", ".", "format", "(", "(", "time", ".", "time", "(", ")", "-", "t", ")", "/", "60", ")", ")", "# write lpfile to path", "if", "not", "args", "[", "'lpfile'", "]", "is", "False", ":", "network", ".", "model", ".", "write", "(", "args", "[", "'lpfile'", "]", ",", "io_options", "=", "{", "'symbolic_solver_labels'", ":", "True", "}", ")", "# write PyPSA results back to database", "if", "args", "[", "'db_export'", "]", ":", "username", "=", "str", "(", "conn", ".", "url", ")", ".", "split", "(", "'//'", ")", "[", "1", "]", ".", "split", "(", "':'", ")", "[", "0", "]", "args", "[", "'user_name'", "]", "=", "username", "results_to_oedb", "(", "session", ",", "network", ",", "dict", "(", "[", "(", "\"disaggregated_results\"", ",", "False", ")", "]", "+", "list", "(", "args", ".", "items", "(", ")", ")", ")", ",", "grid", "=", "'hv'", ",", "safe_results", "=", "False", ")", "if", "disaggregated_network", ":", "results_to_oedb", "(", "session", ",", "disaggregated_network", ",", "dict", "(", "[", "(", "\"disaggregated_results\"", ",", "True", ")", "]", "+", "list", "(", "args", ".", "items", "(", ")", ")", ")", ",", "grid", "=", "'hv'", ",", "safe_results", "=", "False", ")", "# write PyPSA results to csv to path", "if", "not", "args", "[", "'csv_export'", "]", "is", "False", ":", "if", "not", "args", "[", "'pf_post_lopf'", "]", ":", "results_to_csv", "(", "network", ",", "args", ")", "else", ":", "results_to_csv", "(", "network", ",", "args", ",", "pf_solution", "=", "pf_solution", ")", "if", "disaggregated_network", ":", "results_to_csv", "(", "disaggregated_network", ",", "{", "k", ":", "os", ".", "path", ".", "join", "(", "v", ",", "'disaggregated'", ")", "if", "k", "==", "'csv_export'", "else", "v", "for", "k", ",", "v", "in", "args", ".", "items", "(", ")", "}", ")", "# close session", "# session.close()", "return", "network", ",", "disaggregated_network" ]
36.704545
20.322314
def merge_dict(a, b, path=None): """ Merge dict b into a """ if not path: path = [] for key in b: if key in a: if isinstance(a[key], dict) and isinstance(b[key], dict): merge_dict(a[key], b[key], path + [str(key)]) else: continue else: a[key] = b[key] return a
[ "def", "merge_dict", "(", "a", ",", "b", ",", "path", "=", "None", ")", ":", "if", "not", "path", ":", "path", "=", "[", "]", "for", "key", "in", "b", ":", "if", "key", "in", "a", ":", "if", "isinstance", "(", "a", "[", "key", "]", ",", "dict", ")", "and", "isinstance", "(", "b", "[", "key", "]", ",", "dict", ")", ":", "merge_dict", "(", "a", "[", "key", "]", ",", "b", "[", "key", "]", ",", "path", "+", "[", "str", "(", "key", ")", "]", ")", "else", ":", "continue", "else", ":", "a", "[", "key", "]", "=", "b", "[", "key", "]", "return", "a" ]
24.133333
18.133333
def trim_nonpercolating_paths(im, inlet_axis=0, outlet_axis=0): r""" Removes all nonpercolating paths between specified edges This function is essential when performing transport simulations on an image, since image regions that do not span between the desired inlet and outlet do not contribute to the transport. Parameters ---------- im : ND-array The image of the porous material with ```True`` values indicating the phase of interest inlet_axis : int Inlet axis of boundary condition. For three dimensional image the number ranges from 0 to 2. For two dimensional image the range is between 0 to 1. outlet_axis : int Outlet axis of boundary condition. For three dimensional image the number ranges from 0 to 2. For two dimensional image the range is between 0 to 1. Returns ------- image : ND-array A copy of ``im`` with all the nonpercolating paths removed See Also -------- find_disconnected_voxels trim_floating_solid trim_blind_pores """ if im.ndim != im.squeeze().ndim: warnings.warn('Input image conains a singleton axis:' + str(im.shape) + ' Reduce dimensionality with np.squeeze(im) to avoid' + ' unexpected behavior.') im = trim_floating_solid(~im) labels = spim.label(~im)[0] inlet = sp.zeros_like(im, dtype=int) outlet = sp.zeros_like(im, dtype=int) if im.ndim == 3: if inlet_axis == 0: inlet[0, :, :] = 1 elif inlet_axis == 1: inlet[:, 0, :] = 1 elif inlet_axis == 2: inlet[:, :, 0] = 1 if outlet_axis == 0: outlet[-1, :, :] = 1 elif outlet_axis == 1: outlet[:, -1, :] = 1 elif outlet_axis == 2: outlet[:, :, -1] = 1 if im.ndim == 2: if inlet_axis == 0: inlet[0, :] = 1 elif inlet_axis == 1: inlet[:, 0] = 1 if outlet_axis == 0: outlet[-1, :] = 1 elif outlet_axis == 1: outlet[:, -1] = 1 IN = sp.unique(labels*inlet) OUT = sp.unique(labels*outlet) new_im = sp.isin(labels, list(set(IN) ^ set(OUT)), invert=True) im[new_im == 0] = True return ~im
[ "def", "trim_nonpercolating_paths", "(", "im", ",", "inlet_axis", "=", "0", ",", "outlet_axis", "=", "0", ")", ":", "if", "im", ".", "ndim", "!=", "im", ".", "squeeze", "(", ")", ".", "ndim", ":", "warnings", ".", "warn", "(", "'Input image conains a singleton axis:'", "+", "str", "(", "im", ".", "shape", ")", "+", "' Reduce dimensionality with np.squeeze(im) to avoid'", "+", "' unexpected behavior.'", ")", "im", "=", "trim_floating_solid", "(", "~", "im", ")", "labels", "=", "spim", ".", "label", "(", "~", "im", ")", "[", "0", "]", "inlet", "=", "sp", ".", "zeros_like", "(", "im", ",", "dtype", "=", "int", ")", "outlet", "=", "sp", ".", "zeros_like", "(", "im", ",", "dtype", "=", "int", ")", "if", "im", ".", "ndim", "==", "3", ":", "if", "inlet_axis", "==", "0", ":", "inlet", "[", "0", ",", ":", ",", ":", "]", "=", "1", "elif", "inlet_axis", "==", "1", ":", "inlet", "[", ":", ",", "0", ",", ":", "]", "=", "1", "elif", "inlet_axis", "==", "2", ":", "inlet", "[", ":", ",", ":", ",", "0", "]", "=", "1", "if", "outlet_axis", "==", "0", ":", "outlet", "[", "-", "1", ",", ":", ",", ":", "]", "=", "1", "elif", "outlet_axis", "==", "1", ":", "outlet", "[", ":", ",", "-", "1", ",", ":", "]", "=", "1", "elif", "outlet_axis", "==", "2", ":", "outlet", "[", ":", ",", ":", ",", "-", "1", "]", "=", "1", "if", "im", ".", "ndim", "==", "2", ":", "if", "inlet_axis", "==", "0", ":", "inlet", "[", "0", ",", ":", "]", "=", "1", "elif", "inlet_axis", "==", "1", ":", "inlet", "[", ":", ",", "0", "]", "=", "1", "if", "outlet_axis", "==", "0", ":", "outlet", "[", "-", "1", ",", ":", "]", "=", "1", "elif", "outlet_axis", "==", "1", ":", "outlet", "[", ":", ",", "-", "1", "]", "=", "1", "IN", "=", "sp", ".", "unique", "(", "labels", "*", "inlet", ")", "OUT", "=", "sp", ".", "unique", "(", "labels", "*", "outlet", ")", "new_im", "=", "sp", ".", "isin", "(", "labels", ",", "list", "(", "set", "(", "IN", ")", "^", "set", "(", "OUT", ")", ")", ",", "invert", "=", "True", ")", "im", "[", "new_im", "==", "0", "]", "=", "True", "return", "~", "im" ]
30.351351
20.283784
def sortBy(self, keyfunc, ascending=True, numPartitions=None): """ Sorts this RDD by the given keyfunc >>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)] >>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect() [('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)] >>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect() [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)] """ return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
[ "def", "sortBy", "(", "self", ",", "keyfunc", ",", "ascending", "=", "True", ",", "numPartitions", "=", "None", ")", ":", "return", "self", ".", "keyBy", "(", "keyfunc", ")", ".", "sortByKey", "(", "ascending", ",", "numPartitions", ")", ".", "values", "(", ")" ]
47.090909
19.636364
def transplant_node(self, node): """ Transplant a node from another document to become a child of this node, removing it from the source document. The node to be transplanted can be a :class:`Node` based on the same underlying XML library implementation and adapter, or a "raw" node from that implementation. :param node: the node in another document to transplant. :type node: xml4h or implementation node """ if isinstance(node, xml4h.nodes.Node): child_impl_node = node.impl_node original_parent_impl_node = node.parent.impl_node else: child_impl_node = node # Assume it's a valid impl node original_parent_impl_node = self.adapter.get_node_parent(node) self.adapter.import_node(self.impl_node, child_impl_node, original_parent_impl_node, clone=False)
[ "def", "transplant_node", "(", "self", ",", "node", ")", ":", "if", "isinstance", "(", "node", ",", "xml4h", ".", "nodes", ".", "Node", ")", ":", "child_impl_node", "=", "node", ".", "impl_node", "original_parent_impl_node", "=", "node", ".", "parent", ".", "impl_node", "else", ":", "child_impl_node", "=", "node", "# Assume it's a valid impl node", "original_parent_impl_node", "=", "self", ".", "adapter", ".", "get_node_parent", "(", "node", ")", "self", ".", "adapter", ".", "import_node", "(", "self", ".", "impl_node", ",", "child_impl_node", ",", "original_parent_impl_node", ",", "clone", "=", "False", ")" ]
49.333333
20.888889
def validate_to_one(self, value): """ Check if the to_one should exist & casts properly """ if value.rid and self.typeness is int: validators.validate_int(value) if value.rid and not self.skip_exists: if not value.load(): raise ValidationError(self.messages['exists']) return value
[ "def", "validate_to_one", "(", "self", ",", "value", ")", ":", "if", "value", ".", "rid", "and", "self", ".", "typeness", "is", "int", ":", "validators", ".", "validate_int", "(", "value", ")", "if", "value", ".", "rid", "and", "not", "self", ".", "skip_exists", ":", "if", "not", "value", ".", "load", "(", ")", ":", "raise", "ValidationError", "(", "self", ".", "messages", "[", "'exists'", "]", ")", "return", "value" ]
34.6
15.1
def output_closure(self, target, file_number=None): ''' Function to output to a cairo surface target is a cairo Context or filename if file_number is set, then files will be numbered (this is usually set to the current frame number) ''' def output_context(ctx): target_ctx = target target_ctx.set_source_surface(ctx.get_target()) target_ctx.paint() return target_ctx def output_surface(ctx): target_ctx = cairo.Context(target) target_ctx.set_source_surface(ctx.get_target()) target_ctx.paint() return target_ctx def output_file(ctx): root, extension = os.path.splitext(target) if file_number: filename = '%s_%04d%s' % (root, file_number, extension) else: filename = target extension = extension.lower() if extension == '.png': surface = ctx.get_target() surface.write_to_png(target) elif extension == '.pdf': target_ctx = cairo.Context(cairo.PDFSurface(filename, *self.size_or_default())) target_ctx.set_source_surface(ctx.get_target()) target_ctx.paint() elif extension in ('.ps', '.eps'): target_ctx = cairo.Context(cairo.PSSurface(filename, *self.size_or_default())) if extension == '.eps': target_ctx.set_eps(extension='.eps') target_ctx.set_source_surface(ctx.get_target()) target_ctx.paint() elif extension == '.svg': target_ctx = cairo.Context(cairo.SVGSurface(filename, *self.size_or_default())) target_ctx.set_source_surface(ctx.get_target()) target_ctx.paint() return filename if isinstance(target, cairo.Context): return output_context elif isinstance(target, cairo.Surface): return output_surface else: return output_file
[ "def", "output_closure", "(", "self", ",", "target", ",", "file_number", "=", "None", ")", ":", "def", "output_context", "(", "ctx", ")", ":", "target_ctx", "=", "target", "target_ctx", ".", "set_source_surface", "(", "ctx", ".", "get_target", "(", ")", ")", "target_ctx", ".", "paint", "(", ")", "return", "target_ctx", "def", "output_surface", "(", "ctx", ")", ":", "target_ctx", "=", "cairo", ".", "Context", "(", "target", ")", "target_ctx", ".", "set_source_surface", "(", "ctx", ".", "get_target", "(", ")", ")", "target_ctx", ".", "paint", "(", ")", "return", "target_ctx", "def", "output_file", "(", "ctx", ")", ":", "root", ",", "extension", "=", "os", ".", "path", ".", "splitext", "(", "target", ")", "if", "file_number", ":", "filename", "=", "'%s_%04d%s'", "%", "(", "root", ",", "file_number", ",", "extension", ")", "else", ":", "filename", "=", "target", "extension", "=", "extension", ".", "lower", "(", ")", "if", "extension", "==", "'.png'", ":", "surface", "=", "ctx", ".", "get_target", "(", ")", "surface", ".", "write_to_png", "(", "target", ")", "elif", "extension", "==", "'.pdf'", ":", "target_ctx", "=", "cairo", ".", "Context", "(", "cairo", ".", "PDFSurface", "(", "filename", ",", "*", "self", ".", "size_or_default", "(", ")", ")", ")", "target_ctx", ".", "set_source_surface", "(", "ctx", ".", "get_target", "(", ")", ")", "target_ctx", ".", "paint", "(", ")", "elif", "extension", "in", "(", "'.ps'", ",", "'.eps'", ")", ":", "target_ctx", "=", "cairo", ".", "Context", "(", "cairo", ".", "PSSurface", "(", "filename", ",", "*", "self", ".", "size_or_default", "(", ")", ")", ")", "if", "extension", "==", "'.eps'", ":", "target_ctx", ".", "set_eps", "(", "extension", "=", "'.eps'", ")", "target_ctx", ".", "set_source_surface", "(", "ctx", ".", "get_target", "(", ")", ")", "target_ctx", ".", "paint", "(", ")", "elif", "extension", "==", "'.svg'", ":", "target_ctx", "=", "cairo", ".", "Context", "(", "cairo", ".", "SVGSurface", "(", "filename", ",", "*", "self", ".", "size_or_default", "(", ")", ")", ")", "target_ctx", ".", "set_source_surface", "(", "ctx", ".", "get_target", "(", ")", ")", "target_ctx", ".", "paint", "(", ")", "return", "filename", "if", "isinstance", "(", "target", ",", "cairo", ".", "Context", ")", ":", "return", "output_context", "elif", "isinstance", "(", "target", ",", "cairo", ".", "Surface", ")", ":", "return", "output_surface", "else", ":", "return", "output_file" ]
38.981132
16.830189
def monitor(args, watch): """ reloads the script given by argv when src files changes """ watch = watch if isinstance(watch, (list, tuple)) else [watch] watch = [Path(entry).expand().abspath() for entry in watch] event_handler = RunScriptChangeHandler(args) observer = Observer() for entry in watch: if entry.isfile(): entry = entry.parent click.secho('watch recursive: {0}'.format(entry), fg='blue') observer.schedule(event_handler, entry, recursive=True) event_handler.run() # run always once observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join()
[ "def", "monitor", "(", "args", ",", "watch", ")", ":", "watch", "=", "watch", "if", "isinstance", "(", "watch", ",", "(", "list", ",", "tuple", ")", ")", "else", "[", "watch", "]", "watch", "=", "[", "Path", "(", "entry", ")", ".", "expand", "(", ")", ".", "abspath", "(", ")", "for", "entry", "in", "watch", "]", "event_handler", "=", "RunScriptChangeHandler", "(", "args", ")", "observer", "=", "Observer", "(", ")", "for", "entry", "in", "watch", ":", "if", "entry", ".", "isfile", "(", ")", ":", "entry", "=", "entry", ".", "parent", "click", ".", "secho", "(", "'watch recursive: {0}'", ".", "format", "(", "entry", ")", ",", "fg", "=", "'blue'", ")", "observer", ".", "schedule", "(", "event_handler", ",", "entry", ",", "recursive", "=", "True", ")", "event_handler", ".", "run", "(", ")", "# run always once", "observer", ".", "start", "(", ")", "try", ":", "while", "True", ":", "time", ".", "sleep", "(", "1", ")", "except", "KeyboardInterrupt", ":", "observer", ".", "stop", "(", ")", "observer", ".", "join", "(", ")" ]
31.681818
17.045455
def get_feedback(self, feedback_id, model=None, **kwargs): """ List a specified feedback entry. Lists a feedback entry with a specified `feedback_id`. :param str feedback_id: A string that specifies the feedback entry to be included in the output. :param str model: The analysis model to be used by the service. For the **Element classification** and **Compare two documents** methods, the default is `contracts`. For the **Extract tables** method, the default is `tables`. These defaults apply to the standalone methods as well as to the methods' use in batch-processing requests. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ if feedback_id is None: raise ValueError('feedback_id must be provided') headers = {} if 'headers' in kwargs: headers.update(kwargs.get('headers')) sdk_headers = get_sdk_headers('compare-comply', 'V1', 'get_feedback') headers.update(sdk_headers) params = {'version': self.version, 'model': model} url = '/v1/feedback/{0}'.format(*self._encode_path_vars(feedback_id)) response = self.request( method='GET', url=url, headers=headers, params=params, accept_json=True) return response
[ "def", "get_feedback", "(", "self", ",", "feedback_id", ",", "model", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "feedback_id", "is", "None", ":", "raise", "ValueError", "(", "'feedback_id must be provided'", ")", "headers", "=", "{", "}", "if", "'headers'", "in", "kwargs", ":", "headers", ".", "update", "(", "kwargs", ".", "get", "(", "'headers'", ")", ")", "sdk_headers", "=", "get_sdk_headers", "(", "'compare-comply'", ",", "'V1'", ",", "'get_feedback'", ")", "headers", ".", "update", "(", "sdk_headers", ")", "params", "=", "{", "'version'", ":", "self", ".", "version", ",", "'model'", ":", "model", "}", "url", "=", "'/v1/feedback/{0}'", ".", "format", "(", "*", "self", ".", "_encode_path_vars", "(", "feedback_id", ")", ")", "response", "=", "self", ".", "request", "(", "method", "=", "'GET'", ",", "url", "=", "url", ",", "headers", "=", "headers", ",", "params", "=", "params", ",", "accept_json", "=", "True", ")", "return", "response" ]
39.810811
23.648649
def download(self, files=None, destination=None, overwrite=False, callback=None): """Download file or files. :param files: file or files to download :param destination: destination path (defaults to users home directory) :param overwrite: replace existing files? :param callback: callback function that will receive total file size and written bytes as arguments :type files: ``list`` of ``dict`` with file data from filemail :type destination: ``str`` or ``unicode`` :type overwrite: ``bool`` :type callback: ``func`` """ if files is None: files = self.files elif not isinstance(files, list): files = [files] if destination is None: destination = os.path.expanduser('~') for f in files: if not isinstance(f, dict): raise FMBaseError('File must be a <dict> with file data') self._download(f, destination, overwrite, callback)
[ "def", "download", "(", "self", ",", "files", "=", "None", ",", "destination", "=", "None", ",", "overwrite", "=", "False", ",", "callback", "=", "None", ")", ":", "if", "files", "is", "None", ":", "files", "=", "self", ".", "files", "elif", "not", "isinstance", "(", "files", ",", "list", ")", ":", "files", "=", "[", "files", "]", "if", "destination", "is", "None", ":", "destination", "=", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", "for", "f", "in", "files", ":", "if", "not", "isinstance", "(", "f", ",", "dict", ")", ":", "raise", "FMBaseError", "(", "'File must be a <dict> with file data'", ")", "self", ".", "_download", "(", "f", ",", "destination", ",", "overwrite", ",", "callback", ")" ]
32.272727
18.545455
def _build_credentials(self, nexus_switches): """Build credential table for Rest API Client. :param nexus_switches: switch config :returns credentials: switch credentials list """ credentials = {} for switch_ip, attrs in nexus_switches.items(): credentials[switch_ip] = ( attrs[const.USERNAME], attrs[const.PASSWORD], attrs[const.HTTPS_VERIFY], attrs[const.HTTPS_CERT], None) if not attrs[const.HTTPS_VERIFY]: LOG.warning("HTTPS Certificate verification is " "disabled. Your connection to Nexus " "Switch %(ip)s is insecure.", {'ip': switch_ip}) return credentials
[ "def", "_build_credentials", "(", "self", ",", "nexus_switches", ")", ":", "credentials", "=", "{", "}", "for", "switch_ip", ",", "attrs", "in", "nexus_switches", ".", "items", "(", ")", ":", "credentials", "[", "switch_ip", "]", "=", "(", "attrs", "[", "const", ".", "USERNAME", "]", ",", "attrs", "[", "const", ".", "PASSWORD", "]", ",", "attrs", "[", "const", ".", "HTTPS_VERIFY", "]", ",", "attrs", "[", "const", ".", "HTTPS_CERT", "]", ",", "None", ")", "if", "not", "attrs", "[", "const", ".", "HTTPS_VERIFY", "]", ":", "LOG", ".", "warning", "(", "\"HTTPS Certificate verification is \"", "\"disabled. Your connection to Nexus \"", "\"Switch %(ip)s is insecure.\"", ",", "{", "'ip'", ":", "switch_ip", "}", ")", "return", "credentials" ]
43.111111
14.055556
def _read_openjpeg(self, rlevel=0, verbose=False, area=None): """Read a JPEG 2000 image using libopenjpeg. Parameters ---------- rlevel : int, optional Factor by which to rlevel output resolution. Use -1 to get the lowest resolution thumbnail. verbose : bool, optional Print informational messages produced by the OpenJPEG library. area : tuple, optional Specifies decoding image area, (first_row, first_col, last_row, last_col) Returns ------- ndarray The image data. Raises ------ RuntimeError If the image has differing subsample factors. """ self._subsampling_sanity_check() self._populate_dparams(rlevel) with ExitStack() as stack: try: self._dparams.decod_format = self._codec_format dinfo = opj.create_decompress(self._dparams.decod_format) event_mgr = opj.EventMgrType() handler = ctypes.cast(_INFO_CALLBACK, ctypes.c_void_p) event_mgr.info_handler = handler if self.verbose else None event_mgr.warning_handler = ctypes.cast(_WARNING_CALLBACK, ctypes.c_void_p) event_mgr.error_handler = ctypes.cast(_ERROR_CALLBACK, ctypes.c_void_p) opj.set_event_mgr(dinfo, ctypes.byref(event_mgr)) opj.setup_decoder(dinfo, self._dparams) with open(self.filename, 'rb') as fptr: src = fptr.read() cio = opj.cio_open(dinfo, src) raw_image = opj.decode(dinfo, cio) stack.callback(opj.image_destroy, raw_image) stack.callback(opj.destroy_decompress, dinfo) stack.callback(opj.cio_close, cio) image = self._extract_image(raw_image) except ValueError: opj2.check_error(0) if area is not None: x0, y0, x1, y1 = area extent = 2 ** rlevel area = [int(round(float(x) / extent + 2 ** -20)) for x in area] rows = slice(area[0], area[2], None) cols = slice(area[1], area[3], None) image = image[rows, cols] return image
[ "def", "_read_openjpeg", "(", "self", ",", "rlevel", "=", "0", ",", "verbose", "=", "False", ",", "area", "=", "None", ")", ":", "self", ".", "_subsampling_sanity_check", "(", ")", "self", ".", "_populate_dparams", "(", "rlevel", ")", "with", "ExitStack", "(", ")", "as", "stack", ":", "try", ":", "self", ".", "_dparams", ".", "decod_format", "=", "self", ".", "_codec_format", "dinfo", "=", "opj", ".", "create_decompress", "(", "self", ".", "_dparams", ".", "decod_format", ")", "event_mgr", "=", "opj", ".", "EventMgrType", "(", ")", "handler", "=", "ctypes", ".", "cast", "(", "_INFO_CALLBACK", ",", "ctypes", ".", "c_void_p", ")", "event_mgr", ".", "info_handler", "=", "handler", "if", "self", ".", "verbose", "else", "None", "event_mgr", ".", "warning_handler", "=", "ctypes", ".", "cast", "(", "_WARNING_CALLBACK", ",", "ctypes", ".", "c_void_p", ")", "event_mgr", ".", "error_handler", "=", "ctypes", ".", "cast", "(", "_ERROR_CALLBACK", ",", "ctypes", ".", "c_void_p", ")", "opj", ".", "set_event_mgr", "(", "dinfo", ",", "ctypes", ".", "byref", "(", "event_mgr", ")", ")", "opj", ".", "setup_decoder", "(", "dinfo", ",", "self", ".", "_dparams", ")", "with", "open", "(", "self", ".", "filename", ",", "'rb'", ")", "as", "fptr", ":", "src", "=", "fptr", ".", "read", "(", ")", "cio", "=", "opj", ".", "cio_open", "(", "dinfo", ",", "src", ")", "raw_image", "=", "opj", ".", "decode", "(", "dinfo", ",", "cio", ")", "stack", ".", "callback", "(", "opj", ".", "image_destroy", ",", "raw_image", ")", "stack", ".", "callback", "(", "opj", ".", "destroy_decompress", ",", "dinfo", ")", "stack", ".", "callback", "(", "opj", ".", "cio_close", ",", "cio", ")", "image", "=", "self", ".", "_extract_image", "(", "raw_image", ")", "except", "ValueError", ":", "opj2", ".", "check_error", "(", "0", ")", "if", "area", "is", "not", "None", ":", "x0", ",", "y0", ",", "x1", ",", "y1", "=", "area", "extent", "=", "2", "**", "rlevel", "area", "=", "[", "int", "(", "round", "(", "float", "(", "x", ")", "/", "extent", "+", "2", "**", "-", "20", ")", ")", "for", "x", "in", "area", "]", "rows", "=", "slice", "(", "area", "[", "0", "]", ",", "area", "[", "2", "]", ",", "None", ")", "cols", "=", "slice", "(", "area", "[", "1", "]", ",", "area", "[", "3", "]", ",", "None", ")", "image", "=", "image", "[", "rows", ",", "cols", "]", "return", "image" ]
33.9
21.914286
def _task_to_text(self, task): """ Return a standard formatting of a Task serialization. """ started = self._format_date(task.get('started_at', None)) completed = self._format_date(task.get('completed_at', None)) success = task.get('success', None) success_lu = {None: 'Not executed', True: 'Success', False: 'Failed'} run_log = task.get('run_log', {}) return '\n'.join(['Task: %s' % task.get('name', None), 'Command: %s' % task.get('command', None), 'Result: %s' % success_lu[success], 'Started at: %s' % started, 'Completed at: %s' % completed, 'Return Code: %s' % run_log.get('return_code', None), 'Stdout: %s' % run_log.get('stdout', None), 'Stderr: %s' % run_log.get('stderr', None)])
[ "def", "_task_to_text", "(", "self", ",", "task", ")", ":", "started", "=", "self", ".", "_format_date", "(", "task", ".", "get", "(", "'started_at'", ",", "None", ")", ")", "completed", "=", "self", ".", "_format_date", "(", "task", ".", "get", "(", "'completed_at'", ",", "None", ")", ")", "success", "=", "task", ".", "get", "(", "'success'", ",", "None", ")", "success_lu", "=", "{", "None", ":", "'Not executed'", ",", "True", ":", "'Success'", ",", "False", ":", "'Failed'", "}", "run_log", "=", "task", ".", "get", "(", "'run_log'", ",", "{", "}", ")", "return", "'\\n'", ".", "join", "(", "[", "'Task: %s'", "%", "task", ".", "get", "(", "'name'", ",", "None", ")", ",", "'Command: %s'", "%", "task", ".", "get", "(", "'command'", ",", "None", ")", ",", "'Result: %s'", "%", "success_lu", "[", "success", "]", ",", "'Started at: %s'", "%", "started", ",", "'Completed at: %s'", "%", "completed", ",", "'Return Code: %s'", "%", "run_log", ".", "get", "(", "'return_code'", ",", "None", ")", ",", "'Stdout: %s'", "%", "run_log", ".", "get", "(", "'stdout'", ",", "None", ")", ",", "'Stderr: %s'", "%", "run_log", ".", "get", "(", "'stderr'", ",", "None", ")", "]", ")" ]
49.157895
21.526316
def prove(x,beta,kw,y): """ Generate a zero-knowledge proof that DL(Q*kw) = DL(beta*kw) where <Q> = G1. """ # Verify types assertScalarType(kw) assertType(beta, G1Element) assertType(y, G1Element) # Compute the proof. Q = generatorG1() p = Q*kw v = randomZ(orderG1()) t1 = Q*v t2 = beta*v t1.normalize() t2.normalize() c = hashZ(Q,p,beta,y,t1,t2) u = (v-(c*kw)) % orderG1() return (p,c,u)
[ "def", "prove", "(", "x", ",", "beta", ",", "kw", ",", "y", ")", ":", "# Verify types", "assertScalarType", "(", "kw", ")", "assertType", "(", "beta", ",", "G1Element", ")", "assertType", "(", "y", ",", "G1Element", ")", "# Compute the proof.", "Q", "=", "generatorG1", "(", ")", "p", "=", "Q", "*", "kw", "v", "=", "randomZ", "(", "orderG1", "(", ")", ")", "t1", "=", "Q", "*", "v", "t2", "=", "beta", "*", "v", "t1", ".", "normalize", "(", ")", "t2", ".", "normalize", "(", ")", "c", "=", "hashZ", "(", "Q", ",", "p", ",", "beta", ",", "y", ",", "t1", ",", "t2", ")", "u", "=", "(", "v", "-", "(", "c", "*", "kw", ")", ")", "%", "orderG1", "(", ")", "return", "(", "p", ",", "c", ",", "u", ")" ]
19.347826
20.304348
def send_request(self, req): """ Send a standard library :class:`Request` to PyPI and return its response. :param req: The request to send. :return: The HTTP response from PyPI (a standard library HTTPResponse). """ handlers = [] if self.password_handler: handlers.append(self.password_handler) if self.ssl_verifier: handlers.append(self.ssl_verifier) opener = build_opener(*handlers) return opener.open(req)
[ "def", "send_request", "(", "self", ",", "req", ")", ":", "handlers", "=", "[", "]", "if", "self", ".", "password_handler", ":", "handlers", ".", "append", "(", "self", ".", "password_handler", ")", "if", "self", ".", "ssl_verifier", ":", "handlers", ".", "append", "(", "self", ".", "ssl_verifier", ")", "opener", "=", "build_opener", "(", "*", "handlers", ")", "return", "opener", ".", "open", "(", "req", ")" ]
33.8
13.8
def get_identities(self): """List the identity objects contained in `self`. :return: the list of identities. :returntype: `list` of `DiscoIdentity`""" ret=[] l=self.xpath_ctxt.xpathEval("d:identity") if l is not None: for i in l: ret.append(DiscoIdentity(self,i)) return ret
[ "def", "get_identities", "(", "self", ")", ":", "ret", "=", "[", "]", "l", "=", "self", ".", "xpath_ctxt", ".", "xpathEval", "(", "\"d:identity\"", ")", "if", "l", "is", "not", "None", ":", "for", "i", "in", "l", ":", "ret", ".", "append", "(", "DiscoIdentity", "(", "self", ",", "i", ")", ")", "return", "ret" ]
31.727273
13.909091
def roll50(msg): """Roll angle, BDS 5,0 message Args: msg (String): 28 bytes hexadecimal message (BDS50) string Returns: float: angle in degrees, negative->left wing down, positive->right wing down """ d = hex2bin(data(msg)) if d[0] == '0': return None sign = int(d[1]) # 1 -> left wing down value = bin2int(d[2:11]) if sign: value = value - 512 angle = value * 45.0 / 256.0 # degree return round(angle, 1)
[ "def", "roll50", "(", "msg", ")", ":", "d", "=", "hex2bin", "(", "data", "(", "msg", ")", ")", "if", "d", "[", "0", "]", "==", "'0'", ":", "return", "None", "sign", "=", "int", "(", "d", "[", "1", "]", ")", "# 1 -> left wing down", "value", "=", "bin2int", "(", "d", "[", "2", ":", "11", "]", ")", "if", "sign", ":", "value", "=", "value", "-", "512", "angle", "=", "value", "*", "45.0", "/", "256.0", "# degree", "return", "round", "(", "angle", ",", "1", ")" ]
21.173913
22.347826
def explain(self, entry): """ Prints an explanation of the corrections that are being applied for a given compatibility scheme. Inspired by the "explain" methods in many database methodologies. Args: entry: A ComputedEntry. """ d = self.get_explanation_dict(entry) print("The uncorrected value of the energy of %s is %f eV" % (entry.composition, d["uncorrected_energy"])) print("The following corrections / screening are applied for %s:\n" % d["compatibility"]) for c in d["corrections"]: print("%s correction: %s\n" % (c["name"], c["description"])) print("For the entry, this correction has the value %f eV." % c[ "value"]) print("-" * 30) print("The final energy after corrections is %f" % d[ "corrected_energy"])
[ "def", "explain", "(", "self", ",", "entry", ")", ":", "d", "=", "self", ".", "get_explanation_dict", "(", "entry", ")", "print", "(", "\"The uncorrected value of the energy of %s is %f eV\"", "%", "(", "entry", ".", "composition", ",", "d", "[", "\"uncorrected_energy\"", "]", ")", ")", "print", "(", "\"The following corrections / screening are applied for %s:\\n\"", "%", "d", "[", "\"compatibility\"", "]", ")", "for", "c", "in", "d", "[", "\"corrections\"", "]", ":", "print", "(", "\"%s correction: %s\\n\"", "%", "(", "c", "[", "\"name\"", "]", ",", "c", "[", "\"description\"", "]", ")", ")", "print", "(", "\"For the entry, this correction has the value %f eV.\"", "%", "c", "[", "\"value\"", "]", ")", "print", "(", "\"-\"", "*", "30", ")", "print", "(", "\"The final energy after corrections is %f\"", "%", "d", "[", "\"corrected_energy\"", "]", ")" ]
40.434783
19.043478
def check_build_for_current_platform_only(self, targets): """ Performs a check of whether the current target closure has native sources and if so, ensures that Pants is only targeting the current platform. :param tgts: a list of :class:`Target` objects. :return: a boolean value indicating whether the current target closure has native sources. :raises: :class:`pants.base.exceptions.IncompatiblePlatformsError` """ if not self._any_targets_have_native_sources(targets): return False targets_by_platform = pex_build_util.targets_by_platform(targets, self._python_setup) platforms_with_sources = self._get_targets_by_declared_platform_with_placeholders(targets_by_platform) platform_names = list(platforms_with_sources.keys()) if len(platform_names) < 1: raise self.PythonNativeCodeError( "Error: there should be at least one platform in the target closure, because " "we checked that there are native sources.") if platform_names == ['current']: return True raise IncompatiblePlatformsError( 'The target set contains one or more targets that depend on ' 'native code. Please ensure that the platform arguments in all relevant targets and build ' 'options are compatible with the current platform. Found targets for platforms: {}' .format(str(platforms_with_sources)))
[ "def", "check_build_for_current_platform_only", "(", "self", ",", "targets", ")", ":", "if", "not", "self", ".", "_any_targets_have_native_sources", "(", "targets", ")", ":", "return", "False", "targets_by_platform", "=", "pex_build_util", ".", "targets_by_platform", "(", "targets", ",", "self", ".", "_python_setup", ")", "platforms_with_sources", "=", "self", ".", "_get_targets_by_declared_platform_with_placeholders", "(", "targets_by_platform", ")", "platform_names", "=", "list", "(", "platforms_with_sources", ".", "keys", "(", ")", ")", "if", "len", "(", "platform_names", ")", "<", "1", ":", "raise", "self", ".", "PythonNativeCodeError", "(", "\"Error: there should be at least one platform in the target closure, because \"", "\"we checked that there are native sources.\"", ")", "if", "platform_names", "==", "[", "'current'", "]", ":", "return", "True", "raise", "IncompatiblePlatformsError", "(", "'The target set contains one or more targets that depend on '", "'native code. Please ensure that the platform arguments in all relevant targets and build '", "'options are compatible with the current platform. Found targets for platforms: {}'", ".", "format", "(", "str", "(", "platforms_with_sources", ")", ")", ")" ]
46.827586
27.103448
def predict_proba(self, X): """ Calculate posterior probabilities of test data. Parameters ---------- X : array Test data, of dimension N times d (rows are examples, columns are data dimensions) Returns: ------- y_prob : array An array of dimension N times n_inlier_classes+1, containing the probabilities of each row of X being one of the inlier classes, or the outlier class (last column). """ Phi = metrics.pairwise.rbf_kernel(X, self.kernel_pos, self.gamma) N = X.shape[0] predictions = np.zeros((N, self.n_classes+1)) for i in range(N): post = np.zeros(self.n_classes) for c in range(self.n_classes): post[c] = max(0, np.dot(self.theta[self.classes[c]].T, Phi[i, :])) post[c] = min(post[c], 1.) predictions[i, :-1] = post predictions[i, -1] = max(0, 1-sum(post)) return predictions
[ "def", "predict_proba", "(", "self", ",", "X", ")", ":", "Phi", "=", "metrics", ".", "pairwise", ".", "rbf_kernel", "(", "X", ",", "self", ".", "kernel_pos", ",", "self", ".", "gamma", ")", "N", "=", "X", ".", "shape", "[", "0", "]", "predictions", "=", "np", ".", "zeros", "(", "(", "N", ",", "self", ".", "n_classes", "+", "1", ")", ")", "for", "i", "in", "range", "(", "N", ")", ":", "post", "=", "np", ".", "zeros", "(", "self", ".", "n_classes", ")", "for", "c", "in", "range", "(", "self", ".", "n_classes", ")", ":", "post", "[", "c", "]", "=", "max", "(", "0", ",", "np", ".", "dot", "(", "self", ".", "theta", "[", "self", ".", "classes", "[", "c", "]", "]", ".", "T", ",", "Phi", "[", "i", ",", ":", "]", ")", ")", "post", "[", "c", "]", "=", "min", "(", "post", "[", "c", "]", ",", "1.", ")", "predictions", "[", "i", ",", ":", "-", "1", "]", "=", "post", "predictions", "[", "i", ",", "-", "1", "]", "=", "max", "(", "0", ",", "1", "-", "sum", "(", "post", ")", ")", "return", "predictions" ]
34.733333
18.733333
def config_absent(name): ''' Ensure a specific configuration line does not exist in the running config name config line to remove Examples: .. code-block:: yaml add snmp group: onyx.config_absent: - names: - snmp-server community randoSNMPstringHERE group network-operator - snmp-server community AnotherRandomSNMPSTring group network-admin .. note:: For certain cases extra lines could be removed based on dependencies. In this example, included after the example for config_present, the ACLs would be removed because they depend on the existence of the group. ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} matches = __salt__['onyx.cmd']('find', name) if not matches: ret['result'] = True ret['comment'] = 'Config is already absent' elif __opts__['test'] is True: ret['result'] = None ret['comment'] = 'Config will be removed' ret['changes']['new'] = name else: __salt__['onyx.cmd']('delete_config', name) matches = __salt__['onyx.cmd']('find', name) if not matches: ret['result'] = True ret['comment'] = 'Successfully deleted config' ret['changes']['new'] = name else: ret['result'] = False ret['comment'] = 'Failed to delete config' return ret
[ "def", "config_absent", "(", "name", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "False", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", "}", "matches", "=", "__salt__", "[", "'onyx.cmd'", "]", "(", "'find'", ",", "name", ")", "if", "not", "matches", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'Config is already absent'", "elif", "__opts__", "[", "'test'", "]", "is", "True", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "'Config will be removed'", "ret", "[", "'changes'", "]", "[", "'new'", "]", "=", "name", "else", ":", "__salt__", "[", "'onyx.cmd'", "]", "(", "'delete_config'", ",", "name", ")", "matches", "=", "__salt__", "[", "'onyx.cmd'", "]", "(", "'find'", ",", "name", ")", "if", "not", "matches", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'Successfully deleted config'", "ret", "[", "'changes'", "]", "[", "'new'", "]", "=", "name", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to delete config'", "return", "ret" ]
27.826923
23.942308
def check_deps(self): """Dependency checking phase is performed in this method. """ shutit_global.shutit_global_object.yield_to_draw() cfg = self.cfg self.log('PHASE: dependencies', level=logging.DEBUG) self.pause_point('\nNow checking for dependencies between modules', print_input=False, level=3) # Get modules we're going to build to_build = [ self.shutit_map[module_id] for module_id in self.shutit_map if module_id in cfg and cfg[module_id]['shutit.core.module.build'] ] # Add any deps we may need by extending to_build and altering cfg for module in to_build: self.resolve_dependencies(to_build, module) # Dep checking def err_checker(errs, triples): """Collate error information. """ new_triples = [] for err, triple in zip(errs, triples): if not err: new_triples.append(triple) continue found_errs.append(err) return new_triples found_errs = [] triples = [] for depender in to_build: for dependee_id in depender.depends_on: triples.append((depender, self.shutit_map.get(dependee_id), dependee_id)) triples = err_checker([ self.check_dependee_exists(depender, dependee, dependee_id) for depender, dependee, dependee_id in triples ], triples) triples = err_checker([ self.check_dependee_build(depender, dependee, dependee_id) for depender, dependee, dependee_id in triples ], triples) triples = err_checker([ check_dependee_order(depender, dependee, dependee_id) for depender, dependee, dependee_id in triples ], triples) if found_errs: return [(err,) for err in found_errs] self.log('Modules configured to be built (in order) are: ', level=logging.DEBUG) for module_id in self.module_ids(): module = self.shutit_map[module_id] if cfg[module_id]['shutit.core.module.build']: self.log(module_id + ' ' + str(module.run_order), level=logging.DEBUG) self.log('\n', level=logging.DEBUG) return []
[ "def", "check_deps", "(", "self", ")", ":", "shutit_global", ".", "shutit_global_object", ".", "yield_to_draw", "(", ")", "cfg", "=", "self", ".", "cfg", "self", ".", "log", "(", "'PHASE: dependencies'", ",", "level", "=", "logging", ".", "DEBUG", ")", "self", ".", "pause_point", "(", "'\\nNow checking for dependencies between modules'", ",", "print_input", "=", "False", ",", "level", "=", "3", ")", "# Get modules we're going to build", "to_build", "=", "[", "self", ".", "shutit_map", "[", "module_id", "]", "for", "module_id", "in", "self", ".", "shutit_map", "if", "module_id", "in", "cfg", "and", "cfg", "[", "module_id", "]", "[", "'shutit.core.module.build'", "]", "]", "# Add any deps we may need by extending to_build and altering cfg", "for", "module", "in", "to_build", ":", "self", ".", "resolve_dependencies", "(", "to_build", ",", "module", ")", "# Dep checking", "def", "err_checker", "(", "errs", ",", "triples", ")", ":", "\"\"\"Collate error information.\n\t\t\t\"\"\"", "new_triples", "=", "[", "]", "for", "err", ",", "triple", "in", "zip", "(", "errs", ",", "triples", ")", ":", "if", "not", "err", ":", "new_triples", ".", "append", "(", "triple", ")", "continue", "found_errs", ".", "append", "(", "err", ")", "return", "new_triples", "found_errs", "=", "[", "]", "triples", "=", "[", "]", "for", "depender", "in", "to_build", ":", "for", "dependee_id", "in", "depender", ".", "depends_on", ":", "triples", ".", "append", "(", "(", "depender", ",", "self", ".", "shutit_map", ".", "get", "(", "dependee_id", ")", ",", "dependee_id", ")", ")", "triples", "=", "err_checker", "(", "[", "self", ".", "check_dependee_exists", "(", "depender", ",", "dependee", ",", "dependee_id", ")", "for", "depender", ",", "dependee", ",", "dependee_id", "in", "triples", "]", ",", "triples", ")", "triples", "=", "err_checker", "(", "[", "self", ".", "check_dependee_build", "(", "depender", ",", "dependee", ",", "dependee_id", ")", "for", "depender", ",", "dependee", ",", "dependee_id", "in", "triples", "]", ",", "triples", ")", "triples", "=", "err_checker", "(", "[", "check_dependee_order", "(", "depender", ",", "dependee", ",", "dependee_id", ")", "for", "depender", ",", "dependee", ",", "dependee_id", "in", "triples", "]", ",", "triples", ")", "if", "found_errs", ":", "return", "[", "(", "err", ",", ")", "for", "err", "in", "found_errs", "]", "self", ".", "log", "(", "'Modules configured to be built (in order) are: '", ",", "level", "=", "logging", ".", "DEBUG", ")", "for", "module_id", "in", "self", ".", "module_ids", "(", ")", ":", "module", "=", "self", ".", "shutit_map", "[", "module_id", "]", "if", "cfg", "[", "module_id", "]", "[", "'shutit.core.module.build'", "]", ":", "self", ".", "log", "(", "module_id", "+", "' '", "+", "str", "(", "module", ".", "run_order", ")", ",", "level", "=", "logging", ".", "DEBUG", ")", "self", ".", "log", "(", "'\\n'", ",", "level", "=", "logging", ".", "DEBUG", ")", "return", "[", "]" ]
38.183673
25.122449
def search_dates(self, text, languages=None, settings=None): """ Find all substrings of the given string which represent date and/or time and parse them. :param text: A string in a natural language which may contain date and/or time expressions. :type text: str|unicode :param languages: A list of two letters language codes.e.g. ['en', 'es']. If languages are given, it will not attempt to detect the language. :type languages: list :param settings: Configure customized behavior using settings defined in :mod:`dateparser.conf.Settings`. :type settings: dict :return: a dict mapping keys to two letter language code and a list of tuples of pairs: substring representing date expressions and corresponding :mod:`datetime.datetime` object. For example: {'Language': 'en', 'Dates': [('on 4 October 1957', datetime.datetime(1957, 10, 4, 0, 0))]} If language of the string isn't recognised returns: {'Language': None, 'Dates': None} :raises: ValueError - Unknown Language """ language_shortname = self.detect_language(text=text, languages=languages) if not language_shortname: return {'Language': None, 'Dates': None} return {'Language': language_shortname, 'Dates': self.search.search_parse(language_shortname, text, settings=settings)}
[ "def", "search_dates", "(", "self", ",", "text", ",", "languages", "=", "None", ",", "settings", "=", "None", ")", ":", "language_shortname", "=", "self", ".", "detect_language", "(", "text", "=", "text", ",", "languages", "=", "languages", ")", "if", "not", "language_shortname", ":", "return", "{", "'Language'", ":", "None", ",", "'Dates'", ":", "None", "}", "return", "{", "'Language'", ":", "language_shortname", ",", "'Dates'", ":", "self", ".", "search", ".", "search_parse", "(", "language_shortname", ",", "text", ",", "settings", "=", "settings", ")", "}" ]
52.758621
30.62069
def trim_decimals(s, precision=-3): """ Convert from scientific notation using precision """ encoded = s.encode('ascii', 'ignore') str_val = "" if six.PY3: str_val = str(encoded, encoding='ascii', errors='ignore')[:precision] else: # If precision is 0, this must be handled seperately if precision == 0: str_val = str(encoded) else: str_val = str(encoded)[:precision] if len(str_val) > 0: return float(str_val) else: return 0
[ "def", "trim_decimals", "(", "s", ",", "precision", "=", "-", "3", ")", ":", "encoded", "=", "s", ".", "encode", "(", "'ascii'", ",", "'ignore'", ")", "str_val", "=", "\"\"", "if", "six", ".", "PY3", ":", "str_val", "=", "str", "(", "encoded", ",", "encoding", "=", "'ascii'", ",", "errors", "=", "'ignore'", ")", "[", ":", "precision", "]", "else", ":", "# If precision is 0, this must be handled seperately", "if", "precision", "==", "0", ":", "str_val", "=", "str", "(", "encoded", ")", "else", ":", "str_val", "=", "str", "(", "encoded", ")", "[", ":", "precision", "]", "if", "len", "(", "str_val", ")", ">", "0", ":", "return", "float", "(", "str_val", ")", "else", ":", "return", "0" ]
32.444444
15
def expect_token(token, tok_type, tok_str=None): """ Verifies that the given token is of the expected type. If tok_str is given, the token string is verified too. If the token doesn't match, raises an informative ValueError. """ if not match_token(token, tok_type, tok_str): raise ValueError("Expected token %s, got %s on line %s col %s" % ( token_repr(tok_type, tok_str), str(token), token.start[0], token.start[1] + 1))
[ "def", "expect_token", "(", "token", ",", "tok_type", ",", "tok_str", "=", "None", ")", ":", "if", "not", "match_token", "(", "token", ",", "tok_type", ",", "tok_str", ")", ":", "raise", "ValueError", "(", "\"Expected token %s, got %s on line %s col %s\"", "%", "(", "token_repr", "(", "tok_type", ",", "tok_str", ")", ",", "str", "(", "token", ")", ",", "token", ".", "start", "[", "0", "]", ",", "token", ".", "start", "[", "1", "]", "+", "1", ")", ")" ]
48.777778
16.555556
def file_key_regenerate( blockchain_id, hostname, config_path=CONFIG_PATH, wallet_keys=None ): """ Generate a new encryption key. Retire the existing key, if it exists. Return {'status': True} on success Return {'error': ...} on error """ config_dir = os.path.dirname(config_path) current_key = file_key_lookup( blockchain_id, 0, hostname, config_path=config_path ) if 'status' in current_key and current_key['status']: # retire # NOTE: implicitly depends on this method failing only because the key doesn't exist res = file_key_retire( blockchain_id, current_key, config_path=config_path, wallet_keys=wallet_keys ) if 'error' in res: log.error("Failed to retire key %s: %s" % (current_key['key_id'], res['error'])) return {'error': 'Failed to retire key'} # make a new key res = blockstack_gpg.gpg_app_create_key( blockchain_id, "files", hostname, wallet_keys=wallet_keys, config_dir=config_dir ) if 'error' in res: log.error("Failed to generate new key: %s" % res['error']) return {'error': 'Failed to generate new key'} return {'status': True}
[ "def", "file_key_regenerate", "(", "blockchain_id", ",", "hostname", ",", "config_path", "=", "CONFIG_PATH", ",", "wallet_keys", "=", "None", ")", ":", "config_dir", "=", "os", ".", "path", ".", "dirname", "(", "config_path", ")", "current_key", "=", "file_key_lookup", "(", "blockchain_id", ",", "0", ",", "hostname", ",", "config_path", "=", "config_path", ")", "if", "'status'", "in", "current_key", "and", "current_key", "[", "'status'", "]", ":", "# retire", "# NOTE: implicitly depends on this method failing only because the key doesn't exist", "res", "=", "file_key_retire", "(", "blockchain_id", ",", "current_key", ",", "config_path", "=", "config_path", ",", "wallet_keys", "=", "wallet_keys", ")", "if", "'error'", "in", "res", ":", "log", ".", "error", "(", "\"Failed to retire key %s: %s\"", "%", "(", "current_key", "[", "'key_id'", "]", ",", "res", "[", "'error'", "]", ")", ")", "return", "{", "'error'", ":", "'Failed to retire key'", "}", "# make a new key ", "res", "=", "blockstack_gpg", ".", "gpg_app_create_key", "(", "blockchain_id", ",", "\"files\"", ",", "hostname", ",", "wallet_keys", "=", "wallet_keys", ",", "config_dir", "=", "config_dir", ")", "if", "'error'", "in", "res", ":", "log", ".", "error", "(", "\"Failed to generate new key: %s\"", "%", "res", "[", "'error'", "]", ")", "return", "{", "'error'", ":", "'Failed to generate new key'", "}", "return", "{", "'status'", ":", "True", "}" ]
46.16
26.24
def likelihood_table_to_probs(self, lktable): """ Calculates this formula (1), given the log of the numerator as input p_k * f(x_i, a_k) t_k(x_i) = ----------------------- ---K \ p_k * f(x_i, a_k) /__k=1 x_i is data point i P_k is cluster k of K t_k is the posterior probability of x_i belonging to P_k p_k is the prior probability of belong to P_k (the proportional size of P_k) f(x, a) is the likelihood of x with parameters a """ m = lktable.max(1) # row max of lktable shifted = lktable-m[:,np.newaxis] # shift lktable of log-likelihoods to a non-underflowing range expsum = np.exp(shifted).sum(1) # convert logs to (scaled) normal space, and sum the rows logexpsum = np.log(expsum)+m # convert back to log space, and undo the scaling return np.exp(lktable - logexpsum[:, np.newaxis])
[ "def", "likelihood_table_to_probs", "(", "self", ",", "lktable", ")", ":", "m", "=", "lktable", ".", "max", "(", "1", ")", "# row max of lktable", "shifted", "=", "lktable", "-", "m", "[", ":", ",", "np", ".", "newaxis", "]", "# shift lktable of log-likelihoods to a non-underflowing range", "expsum", "=", "np", ".", "exp", "(", "shifted", ")", ".", "sum", "(", "1", ")", "# convert logs to (scaled) normal space, and sum the rows", "logexpsum", "=", "np", ".", "log", "(", "expsum", ")", "+", "m", "# convert back to log space, and undo the scaling", "return", "np", ".", "exp", "(", "lktable", "-", "logexpsum", "[", ":", ",", "np", ".", "newaxis", "]", ")" ]
47.52381
20.47619
def paintEvent(self, event): """ Overloads the paint event to paint additional \ hint information if no text is set on the \ editor. :param event | <QPaintEvent> """ super(XLineEdit, self).paintEvent(event) # paint the hint text if not text is set if self.text() and not (self.icon() and not self.icon().isNull()): return # paint the hint text with XPainter(self) as painter: painter.setPen(self.hintColor()) icon = self.icon() left, top, right, bottom = self.getTextMargins() w = self.width() h = self.height() - 2 w -= (right + left) h -= (bottom + top) if icon and not icon.isNull(): size = icon.actualSize(self.iconSize()) x = self.cornerRadius() + 2 y = (self.height() - size.height()) / 2.0 painter.drawPixmap(x, y, icon.pixmap(size.width(), size.height())) w -= size.width() - 2 else: x = 6 + left w -= self._buttonWidth y = 2 + top # create the elided hint if not self.text() and self.hint(): rect = self.cursorRect() metrics = QFontMetrics(self.font()) hint = metrics.elidedText(self.hint(), Qt.ElideRight, w) align = self.alignment() if align & Qt.AlignHCenter: x = 0 else: x = rect.center().x() painter.drawText(x, y, w, h, align, hint)
[ "def", "paintEvent", "(", "self", ",", "event", ")", ":", "super", "(", "XLineEdit", ",", "self", ")", ".", "paintEvent", "(", "event", ")", "# paint the hint text if not text is set", "if", "self", ".", "text", "(", ")", "and", "not", "(", "self", ".", "icon", "(", ")", "and", "not", "self", ".", "icon", "(", ")", ".", "isNull", "(", ")", ")", ":", "return", "# paint the hint text", "with", "XPainter", "(", "self", ")", "as", "painter", ":", "painter", ".", "setPen", "(", "self", ".", "hintColor", "(", ")", ")", "icon", "=", "self", ".", "icon", "(", ")", "left", ",", "top", ",", "right", ",", "bottom", "=", "self", ".", "getTextMargins", "(", ")", "w", "=", "self", ".", "width", "(", ")", "h", "=", "self", ".", "height", "(", ")", "-", "2", "w", "-=", "(", "right", "+", "left", ")", "h", "-=", "(", "bottom", "+", "top", ")", "if", "icon", "and", "not", "icon", ".", "isNull", "(", ")", ":", "size", "=", "icon", ".", "actualSize", "(", "self", ".", "iconSize", "(", ")", ")", "x", "=", "self", ".", "cornerRadius", "(", ")", "+", "2", "y", "=", "(", "self", ".", "height", "(", ")", "-", "size", ".", "height", "(", ")", ")", "/", "2.0", "painter", ".", "drawPixmap", "(", "x", ",", "y", ",", "icon", ".", "pixmap", "(", "size", ".", "width", "(", ")", ",", "size", ".", "height", "(", ")", ")", ")", "w", "-=", "size", ".", "width", "(", ")", "-", "2", "else", ":", "x", "=", "6", "+", "left", "w", "-=", "self", ".", "_buttonWidth", "y", "=", "2", "+", "top", "# create the elided hint\r", "if", "not", "self", ".", "text", "(", ")", "and", "self", ".", "hint", "(", ")", ":", "rect", "=", "self", ".", "cursorRect", "(", ")", "metrics", "=", "QFontMetrics", "(", "self", ".", "font", "(", ")", ")", "hint", "=", "metrics", ".", "elidedText", "(", "self", ".", "hint", "(", ")", ",", "Qt", ".", "ElideRight", ",", "w", ")", "align", "=", "self", ".", "alignment", "(", ")", "if", "align", "&", "Qt", ".", "AlignHCenter", ":", "x", "=", "0", "else", ":", "x", "=", "rect", ".", "center", "(", ")", ".", "x", "(", ")", "painter", ".", "drawText", "(", "x", ",", "y", ",", "w", ",", "h", ",", "align", ",", "hint", ")" ]
33.796296
15.537037
def to_set_field(cls): """ Returns a callable instance that will convert a value to a Sequence. :param cls: Valid class type of the items in the Sequence. :return: instance of the SequenceConverter. """ class SetConverter(object): def __init__(self, cls): self._cls = cls @property def cls(self): return resolve_class(self._cls) def __call__(self, values): values = values or set() args = {to_model(self.cls, value) for value in values} return TypedSet(cls=self.cls, args=args) return SetConverter(cls)
[ "def", "to_set_field", "(", "cls", ")", ":", "class", "SetConverter", "(", "object", ")", ":", "def", "__init__", "(", "self", ",", "cls", ")", ":", "self", ".", "_cls", "=", "cls", "@", "property", "def", "cls", "(", "self", ")", ":", "return", "resolve_class", "(", "self", ".", "_cls", ")", "def", "__call__", "(", "self", ",", "values", ")", ":", "values", "=", "values", "or", "set", "(", ")", "args", "=", "{", "to_model", "(", "self", ".", "cls", ",", "value", ")", "for", "value", "in", "values", "}", "return", "TypedSet", "(", "cls", "=", "self", ".", "cls", ",", "args", "=", "args", ")", "return", "SetConverter", "(", "cls", ")" ]
27.545455
18.727273
def encrypt(self, msg, alg='aes_128_cbc', padding='PKCS#7', b64enc=True, block_size=AES_BLOCK_SIZE): """ :param key: The encryption key :param msg: Message to be encrypted :param padding: Which padding that should be used :param b64enc: Whether the result should be base64encoded :param block_size: If PKCS#7 padding which block size to use :return: The encrypted message """ self.__class__._deprecation_notice() if padding == 'PKCS#7': _block_size = block_size elif padding == 'PKCS#5': _block_size = 8 else: _block_size = 0 if _block_size: plen = _block_size - (len(msg) % _block_size) c = chr(plen).encode() msg += c * plen cipher, iv = self.build_cipher(alg) encryptor = cipher.encryptor() cmsg = iv + encryptor.update(msg) + encryptor.finalize() if b64enc: enc_msg = _base64.b64encode(cmsg) else: enc_msg = cmsg return enc_msg
[ "def", "encrypt", "(", "self", ",", "msg", ",", "alg", "=", "'aes_128_cbc'", ",", "padding", "=", "'PKCS#7'", ",", "b64enc", "=", "True", ",", "block_size", "=", "AES_BLOCK_SIZE", ")", ":", "self", ".", "__class__", ".", "_deprecation_notice", "(", ")", "if", "padding", "==", "'PKCS#7'", ":", "_block_size", "=", "block_size", "elif", "padding", "==", "'PKCS#5'", ":", "_block_size", "=", "8", "else", ":", "_block_size", "=", "0", "if", "_block_size", ":", "plen", "=", "_block_size", "-", "(", "len", "(", "msg", ")", "%", "_block_size", ")", "c", "=", "chr", "(", "plen", ")", ".", "encode", "(", ")", "msg", "+=", "c", "*", "plen", "cipher", ",", "iv", "=", "self", ".", "build_cipher", "(", "alg", ")", "encryptor", "=", "cipher", ".", "encryptor", "(", ")", "cmsg", "=", "iv", "+", "encryptor", ".", "update", "(", "msg", ")", "+", "encryptor", ".", "finalize", "(", ")", "if", "b64enc", ":", "enc_msg", "=", "_base64", ".", "b64encode", "(", "cmsg", ")", "else", ":", "enc_msg", "=", "cmsg", "return", "enc_msg" ]
32.333333
15.666667
def sync_from_remote(org, syncer, remote): """ Sync local instance against a single remote object :param * org: the org :param * syncer: the local model syncer :param * remote: the remote object :return: the outcome (created, updated or deleted) """ identity = syncer.identify_remote(remote) with syncer.lock(org, identity): existing = syncer.fetch_local(org, identity) # derive kwargs for the local model (none return here means don't keep) remote_as_kwargs = syncer.local_kwargs(org, remote) # exists locally if existing: existing.org = org # saves pre-fetching since we already have the org if remote_as_kwargs: if syncer.update_required(existing, remote, remote_as_kwargs) or not existing.is_active: for field, value in remote_as_kwargs.items(): setattr(existing, field, value) existing.is_active = True existing.save() return SyncOutcome.updated elif existing.is_active: # exists locally, but shouldn't now to due to model changes syncer.delete_local(existing) return SyncOutcome.deleted elif remote_as_kwargs: syncer.model.objects.create(**remote_as_kwargs) return SyncOutcome.created return SyncOutcome.ignored
[ "def", "sync_from_remote", "(", "org", ",", "syncer", ",", "remote", ")", ":", "identity", "=", "syncer", ".", "identify_remote", "(", "remote", ")", "with", "syncer", ".", "lock", "(", "org", ",", "identity", ")", ":", "existing", "=", "syncer", ".", "fetch_local", "(", "org", ",", "identity", ")", "# derive kwargs for the local model (none return here means don't keep)", "remote_as_kwargs", "=", "syncer", ".", "local_kwargs", "(", "org", ",", "remote", ")", "# exists locally", "if", "existing", ":", "existing", ".", "org", "=", "org", "# saves pre-fetching since we already have the org", "if", "remote_as_kwargs", ":", "if", "syncer", ".", "update_required", "(", "existing", ",", "remote", ",", "remote_as_kwargs", ")", "or", "not", "existing", ".", "is_active", ":", "for", "field", ",", "value", "in", "remote_as_kwargs", ".", "items", "(", ")", ":", "setattr", "(", "existing", ",", "field", ",", "value", ")", "existing", ".", "is_active", "=", "True", "existing", ".", "save", "(", ")", "return", "SyncOutcome", ".", "updated", "elif", "existing", ".", "is_active", ":", "# exists locally, but shouldn't now to due to model changes", "syncer", ".", "delete_local", "(", "existing", ")", "return", "SyncOutcome", ".", "deleted", "elif", "remote_as_kwargs", ":", "syncer", ".", "model", ".", "objects", ".", "create", "(", "*", "*", "remote_as_kwargs", ")", "return", "SyncOutcome", ".", "created", "return", "SyncOutcome", ".", "ignored" ]
35.641026
20.512821
def parseConfig(opt): """Parse configuration :params opt: dict-like object with config and messages keys :returns: restarter, path """ places = ctllib.Places(config=opt['config'], messages=opt['messages']) restarter = functools.partial(ctllib.restart, places) path = filepath.FilePath(opt['config']) return restarter, path
[ "def", "parseConfig", "(", "opt", ")", ":", "places", "=", "ctllib", ".", "Places", "(", "config", "=", "opt", "[", "'config'", "]", ",", "messages", "=", "opt", "[", "'messages'", "]", ")", "restarter", "=", "functools", ".", "partial", "(", "ctllib", ".", "restart", ",", "places", ")", "path", "=", "filepath", ".", "FilePath", "(", "opt", "[", "'config'", "]", ")", "return", "restarter", ",", "path" ]
34.6
16.1
def _create_hidden_port(self, context, network_id, device_id, fixed_ips, port_type=DEVICE_OWNER_ROUTER_INTF): """Creates port used specially for HA purposes.""" port = {'port': { 'tenant_id': '', # intentionally not set 'network_id': network_id, 'mac_address': ATTR_NOT_SPECIFIED, 'fixed_ips': fixed_ips, 'device_id': device_id, 'device_owner': port_type, 'admin_state_up': True, 'name': ''}} if extensions.is_extension_supported(self._core_plugin, "dns-integration"): port['port'].update(dns_name='') core_plugin = bc.get_plugin() return core_plugin.create_port(context, port)
[ "def", "_create_hidden_port", "(", "self", ",", "context", ",", "network_id", ",", "device_id", ",", "fixed_ips", ",", "port_type", "=", "DEVICE_OWNER_ROUTER_INTF", ")", ":", "port", "=", "{", "'port'", ":", "{", "'tenant_id'", ":", "''", ",", "# intentionally not set", "'network_id'", ":", "network_id", ",", "'mac_address'", ":", "ATTR_NOT_SPECIFIED", ",", "'fixed_ips'", ":", "fixed_ips", ",", "'device_id'", ":", "device_id", ",", "'device_owner'", ":", "port_type", ",", "'admin_state_up'", ":", "True", ",", "'name'", ":", "''", "}", "}", "if", "extensions", ".", "is_extension_supported", "(", "self", ".", "_core_plugin", ",", "\"dns-integration\"", ")", ":", "port", "[", "'port'", "]", ".", "update", "(", "dns_name", "=", "''", ")", "core_plugin", "=", "bc", ".", "get_plugin", "(", ")", "return", "core_plugin", ".", "create_port", "(", "context", ",", "port", ")" ]
46.294118
11.588235
def _rescale(ar): """Shift and rescale array ar to the interval [-1, 1]""" max = np.nanmax(ar) min = np.nanmin(ar) midpoint = (max + min) / 2.0 return 2.0 * (ar - midpoint) / (max - min)
[ "def", "_rescale", "(", "ar", ")", ":", "max", "=", "np", ".", "nanmax", "(", "ar", ")", "min", "=", "np", ".", "nanmin", "(", "ar", ")", "midpoint", "=", "(", "max", "+", "min", ")", "/", "2.0", "return", "2.0", "*", "(", "ar", "-", "midpoint", ")", "/", "(", "max", "-", "min", ")" ]
33.5
11.833333
def read_cyclic_can_msg(self, channel, count): """ Reads back the list of CAN messages for automatically sending. :param int channel: CAN channel, to be used (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`). :param int count: The number of cyclic CAN messages to be received. :return: List of received CAN messages (up to 16, see structure :class:`CanMsg`). :rtype: list(CanMsg) """ c_channel = BYTE(channel) c_can_msg = (CanMsg * count)() c_count = DWORD(count) UcanReadCyclicCanMsg(self._handle, byref(c_channel), c_can_msg, c_count) return c_can_msg[:c_count.value]
[ "def", "read_cyclic_can_msg", "(", "self", ",", "channel", ",", "count", ")", ":", "c_channel", "=", "BYTE", "(", "channel", ")", "c_can_msg", "=", "(", "CanMsg", "*", "count", ")", "(", ")", "c_count", "=", "DWORD", "(", "count", ")", "UcanReadCyclicCanMsg", "(", "self", ".", "_handle", ",", "byref", "(", "c_channel", ")", ",", "c_can_msg", ",", "c_count", ")", "return", "c_can_msg", "[", ":", "c_count", ".", "value", "]" ]
47.428571
21.714286
def rescan(self): """Checks files and directories on watchlist for updates, rescans them for new data products. If any are found, returns them. Skips those in directories whose watchingState is set to Purr.UNWATCHED. """ if not self.attached: return dprint(5, "starting rescan") newstuff = {}; # this accumulates names of new or changed files. Keys are paths, values are 'quiet' flag. # store timestamp of scan self.last_scan_timestamp = time.time() # go through watched files/directories, check for mtime changes for path, watcher in list(self.watchers.items()): # get list of new files from watcher newfiles = watcher.newFiles() # None indicates access error, so drop it from watcher set if newfiles is None: if watcher.survive_deletion: dprintf(5, "access error on %s, but will still be watched\n", watcher.path) else: dprintf(2, "access error on %s, will no longer be watched\n", watcher.path) del self.watchers[path] if not watcher.disappeared: self.emit(SIGNAL("disappearedFile"), path) watcher.disappeared = True continue dprintf(5, "%s: %d new file(s)\n", watcher.path, len(newfiles)) # if a file has its own watcher, and is independently reported by a directory watcher, skip the directory's # version and let the file's watcher report it. Reason for this is that the file watcher may have a more # up-to-date timestamp, so we trust it over the dir watcher. newfiles = [p for p in newfiles if p is path or p not in self.watchers] # skip files in self._unwatched_paths newfiles = [filename for filename in newfiles if self._watching_state.get(os.path.dirname(filename)) > Purr.UNWATCHED] # Now go through files and add them to the newstuff dict for newfile in newfiles: # if quiet flag is explicitly set on watcher, enforce it # if not pouncing on directory, also add quietly if watcher.quiet or self._watching_state.get(os.path.dirname(newfile)) < Purr.POUNCE: quiet = True # else add quietly if file is not in the quiet patterns else: quiet = matches_patterns(os.path.basename(newfile), self._quiet_patterns) # add file to list of new products. Since a file may be reported by multiple # watchers, make the quiet flag a logical AND of all the quiet flags (i.e. DP will be # marked as quiet only if all watchers report it as quiet). newstuff[newfile] = quiet and newstuff.get(newfile, True) dprintf(4, "%s: new data product, quiet=%d (watcher quiet: %s)\n", newfile, quiet, watcher.quiet) # add a watcher for this file to the temp_watchers list. this is used below # to detect renamed and deleted files self.temp_watchers[newfile] = Purrer.WatchedFile(newfile) # now, go through temp_watchers to see if any newly pounced-on files have disappeared for path, watcher in list(self.temp_watchers.items()): # get list of new files from watcher if watcher.newFiles() is None: dprintf(2, "access error on %s, marking as disappeared", watcher.path) del self.temp_watchers[path] self.emit(SIGNAL("disappearedFile"), path) # if we have new data products, send them to the main window return self.makeDataProducts(iter(newstuff.items()))
[ "def", "rescan", "(", "self", ")", ":", "if", "not", "self", ".", "attached", ":", "return", "dprint", "(", "5", ",", "\"starting rescan\"", ")", "newstuff", "=", "{", "}", "# this accumulates names of new or changed files. Keys are paths, values are 'quiet' flag.", "# store timestamp of scan", "self", ".", "last_scan_timestamp", "=", "time", ".", "time", "(", ")", "# go through watched files/directories, check for mtime changes", "for", "path", ",", "watcher", "in", "list", "(", "self", ".", "watchers", ".", "items", "(", ")", ")", ":", "# get list of new files from watcher", "newfiles", "=", "watcher", ".", "newFiles", "(", ")", "# None indicates access error, so drop it from watcher set", "if", "newfiles", "is", "None", ":", "if", "watcher", ".", "survive_deletion", ":", "dprintf", "(", "5", ",", "\"access error on %s, but will still be watched\\n\"", ",", "watcher", ".", "path", ")", "else", ":", "dprintf", "(", "2", ",", "\"access error on %s, will no longer be watched\\n\"", ",", "watcher", ".", "path", ")", "del", "self", ".", "watchers", "[", "path", "]", "if", "not", "watcher", ".", "disappeared", ":", "self", ".", "emit", "(", "SIGNAL", "(", "\"disappearedFile\"", ")", ",", "path", ")", "watcher", ".", "disappeared", "=", "True", "continue", "dprintf", "(", "5", ",", "\"%s: %d new file(s)\\n\"", ",", "watcher", ".", "path", ",", "len", "(", "newfiles", ")", ")", "# if a file has its own watcher, and is independently reported by a directory watcher, skip the directory's", "# version and let the file's watcher report it. Reason for this is that the file watcher may have a more", "# up-to-date timestamp, so we trust it over the dir watcher.", "newfiles", "=", "[", "p", "for", "p", "in", "newfiles", "if", "p", "is", "path", "or", "p", "not", "in", "self", ".", "watchers", "]", "# skip files in self._unwatched_paths", "newfiles", "=", "[", "filename", "for", "filename", "in", "newfiles", "if", "self", ".", "_watching_state", ".", "get", "(", "os", ".", "path", ".", "dirname", "(", "filename", ")", ")", ">", "Purr", ".", "UNWATCHED", "]", "# Now go through files and add them to the newstuff dict", "for", "newfile", "in", "newfiles", ":", "# if quiet flag is explicitly set on watcher, enforce it", "# if not pouncing on directory, also add quietly", "if", "watcher", ".", "quiet", "or", "self", ".", "_watching_state", ".", "get", "(", "os", ".", "path", ".", "dirname", "(", "newfile", ")", ")", "<", "Purr", ".", "POUNCE", ":", "quiet", "=", "True", "# else add quietly if file is not in the quiet patterns", "else", ":", "quiet", "=", "matches_patterns", "(", "os", ".", "path", ".", "basename", "(", "newfile", ")", ",", "self", ".", "_quiet_patterns", ")", "# add file to list of new products. Since a file may be reported by multiple", "# watchers, make the quiet flag a logical AND of all the quiet flags (i.e. DP will be", "# marked as quiet only if all watchers report it as quiet).", "newstuff", "[", "newfile", "]", "=", "quiet", "and", "newstuff", ".", "get", "(", "newfile", ",", "True", ")", "dprintf", "(", "4", ",", "\"%s: new data product, quiet=%d (watcher quiet: %s)\\n\"", ",", "newfile", ",", "quiet", ",", "watcher", ".", "quiet", ")", "# add a watcher for this file to the temp_watchers list. this is used below", "# to detect renamed and deleted files", "self", ".", "temp_watchers", "[", "newfile", "]", "=", "Purrer", ".", "WatchedFile", "(", "newfile", ")", "# now, go through temp_watchers to see if any newly pounced-on files have disappeared", "for", "path", ",", "watcher", "in", "list", "(", "self", ".", "temp_watchers", ".", "items", "(", ")", ")", ":", "# get list of new files from watcher", "if", "watcher", ".", "newFiles", "(", ")", "is", "None", ":", "dprintf", "(", "2", ",", "\"access error on %s, marking as disappeared\"", ",", "watcher", ".", "path", ")", "del", "self", ".", "temp_watchers", "[", "path", "]", "self", ".", "emit", "(", "SIGNAL", "(", "\"disappearedFile\"", ")", ",", "path", ")", "# if we have new data products, send them to the main window", "return", "self", ".", "makeDataProducts", "(", "iter", "(", "newstuff", ".", "items", "(", ")", ")", ")" ]
63.830508
28.067797
def calculate_between_class_scatter_matrix(X, y): """Calculates the Between-Class Scatter matrix Parameters: ----------- X : array-like, shape (m, n) - the samples y : array-like, shape (m, ) - the class labels Returns: -------- between_class_scatter_matrix : array-like, shape (n, n) """ mean_vectors = calculate_mean_vectors(X, y) n_features = X.shape[1] Sb = np.zeros((n_features, n_features)) m = np.mean(X, axis=0).reshape(n_features, 1) for cl, m_i in zip(np.unique(y), mean_vectors): v = m_i.reshape(n_features, 1) - m Sb += X[y == cl, :].shape[0] * v @ v.T return Sb
[ "def", "calculate_between_class_scatter_matrix", "(", "X", ",", "y", ")", ":", "mean_vectors", "=", "calculate_mean_vectors", "(", "X", ",", "y", ")", "n_features", "=", "X", ".", "shape", "[", "1", "]", "Sb", "=", "np", ".", "zeros", "(", "(", "n_features", ",", "n_features", ")", ")", "m", "=", "np", ".", "mean", "(", "X", ",", "axis", "=", "0", ")", ".", "reshape", "(", "n_features", ",", "1", ")", "for", "cl", ",", "m_i", "in", "zip", "(", "np", ".", "unique", "(", "y", ")", ",", "mean_vectors", ")", ":", "v", "=", "m_i", ".", "reshape", "(", "n_features", ",", "1", ")", "-", "m", "Sb", "+=", "X", "[", "y", "==", "cl", ",", ":", "]", ".", "shape", "[", "0", "]", "*", "v", "@", "v", ".", "T", "return", "Sb" ]
26.708333
19.166667
def findAll(self, name=None, attrs={}, recursive=True, text=None, limit=None, **kwargs): """Extracts a list of Tag objects that match the given criteria. You can specify the name of the Tag and any attributes you want the Tag to have. The value of a key-value pair in the 'attrs' map can be a string, a list of strings, a regular expression object, or a callable that takes a string and returns whether or not the string matches for some custom definition of 'matches'. The same is true of the tag name.""" generator = self.recursiveChildGenerator if not recursive: generator = self.childGenerator return self._findAll(name, attrs, text, limit, generator, **kwargs)
[ "def", "findAll", "(", "self", ",", "name", "=", "None", ",", "attrs", "=", "{", "}", ",", "recursive", "=", "True", ",", "text", "=", "None", ",", "limit", "=", "None", ",", "*", "*", "kwargs", ")", ":", "generator", "=", "self", ".", "recursiveChildGenerator", "if", "not", "recursive", ":", "generator", "=", "self", ".", "childGenerator", "return", "self", ".", "_findAll", "(", "name", ",", "attrs", ",", "text", ",", "limit", ",", "generator", ",", "*", "*", "kwargs", ")" ]
51.266667
17.4
def remember(self, key, minutes, callback): """ Get an item from the cache, or store the default value. :param key: The cache key :type key: str :param minutes: The lifetime in minutes of the cached value :type minutes: int or datetime :param callback: The default function :type callback: mixed :rtype: mixed """ # If the item exists in the cache we will just return this immediately # otherwise we will execute the given callback and cache the result # of that execution for the given number of minutes in storage. val = self.get(key) if val is not None: return val val = value(callback) self.put(key, val, minutes) return val
[ "def", "remember", "(", "self", ",", "key", ",", "minutes", ",", "callback", ")", ":", "# If the item exists in the cache we will just return this immediately", "# otherwise we will execute the given callback and cache the result", "# of that execution for the given number of minutes in storage.", "val", "=", "self", ".", "get", "(", "key", ")", "if", "val", "is", "not", "None", ":", "return", "val", "val", "=", "value", "(", "callback", ")", "self", ".", "put", "(", "key", ",", "val", ",", "minutes", ")", "return", "val" ]
28.333333
21.518519
def get_args(job): """ This function gets the arguments from a job :param job: job dictionary :return: input tuple, optional tuple """ return tuple(_get_args_loop(job, INPUT_FIELD)), \ tuple(_get_args_loop(job, OPTIONAL_FIELD))
[ "def", "get_args", "(", "job", ")", ":", "return", "tuple", "(", "_get_args_loop", "(", "job", ",", "INPUT_FIELD", ")", ")", ",", "tuple", "(", "_get_args_loop", "(", "job", ",", "OPTIONAL_FIELD", ")", ")" ]
31.5
7.75
def max_event_offset(event_list): """Find the offset (end-time) of last event Parameters ---------- event_list : list or dcase_util.containers.MetaDataContainer A list containing event dicts Returns ------- float > 0 maximum offset """ if isinstance(event_list, dcase_util.containers.MetaDataContainer): return event_list.max_offset else: max_offset = 0 for event in event_list: if 'event_offset' in event: if event['event_offset'] > max_offset: max_offset = event['event_offset'] elif 'offset' in event: if event['offset'] > max_offset: max_offset = event['offset'] return max_offset
[ "def", "max_event_offset", "(", "event_list", ")", ":", "if", "isinstance", "(", "event_list", ",", "dcase_util", ".", "containers", ".", "MetaDataContainer", ")", ":", "return", "event_list", ".", "max_offset", "else", ":", "max_offset", "=", "0", "for", "event", "in", "event_list", ":", "if", "'event_offset'", "in", "event", ":", "if", "event", "[", "'event_offset'", "]", ">", "max_offset", ":", "max_offset", "=", "event", "[", "'event_offset'", "]", "elif", "'offset'", "in", "event", ":", "if", "event", "[", "'offset'", "]", ">", "max_offset", ":", "max_offset", "=", "event", "[", "'offset'", "]", "return", "max_offset" ]
24.866667
20.866667
def write_tsv(output_stream, *tup, **kwargs): """ Write argument list in `tup` out as a tab-separeated row to the stream. """ encoding = kwargs.get('encoding') or 'utf-8' value = '\t'.join([s for s in tup]) + '\n' output_stream.write(value.encode(encoding))
[ "def", "write_tsv", "(", "output_stream", ",", "*", "tup", ",", "*", "*", "kwargs", ")", ":", "encoding", "=", "kwargs", ".", "get", "(", "'encoding'", ")", "or", "'utf-8'", "value", "=", "'\\t'", ".", "join", "(", "[", "s", "for", "s", "in", "tup", "]", ")", "+", "'\\n'", "output_stream", ".", "write", "(", "value", ".", "encode", "(", "encoding", ")", ")" ]
39.285714
8.714286
def walk(self, relpath, topdown=True): """Walk the file tree rooted at `path`. Works like os.walk but returned root value is relative path. Ignored paths will not be returned. """ for root, dirs, files in self._walk_raw(relpath, topdown): matched_dirs = self.ignore.match_files([os.path.join(root, "{}/".format(d)) for d in dirs]) matched_files = self.ignore.match_files([os.path.join(root, f) for f in files]) for matched_dir in matched_dirs: dirs.remove(fast_relpath(matched_dir, root).rstrip('/')) for matched_file in matched_files: files.remove(fast_relpath(matched_file, root)) yield root, dirs, files
[ "def", "walk", "(", "self", ",", "relpath", ",", "topdown", "=", "True", ")", ":", "for", "root", ",", "dirs", ",", "files", "in", "self", ".", "_walk_raw", "(", "relpath", ",", "topdown", ")", ":", "matched_dirs", "=", "self", ".", "ignore", ".", "match_files", "(", "[", "os", ".", "path", ".", "join", "(", "root", ",", "\"{}/\"", ".", "format", "(", "d", ")", ")", "for", "d", "in", "dirs", "]", ")", "matched_files", "=", "self", ".", "ignore", ".", "match_files", "(", "[", "os", ".", "path", ".", "join", "(", "root", ",", "f", ")", "for", "f", "in", "files", "]", ")", "for", "matched_dir", "in", "matched_dirs", ":", "dirs", ".", "remove", "(", "fast_relpath", "(", "matched_dir", ",", "root", ")", ".", "rstrip", "(", "'/'", ")", ")", "for", "matched_file", "in", "matched_files", ":", "files", ".", "remove", "(", "fast_relpath", "(", "matched_file", ",", "root", ")", ")", "yield", "root", ",", "dirs", ",", "files" ]
41.25
20.125