hexsha
stringlengths
40
40
size
int64
2
1.02M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
245
max_stars_repo_name
stringlengths
6
130
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
245
max_issues_repo_name
stringlengths
6
130
max_issues_repo_head_hexsha
stringlengths
40
40
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
245
max_forks_repo_name
stringlengths
6
130
max_forks_repo_head_hexsha
stringlengths
40
40
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
2
1.02M
avg_line_length
float64
1
417k
max_line_length
int64
1
987k
alphanum_fraction
float64
0
1
content_no_comment
stringlengths
0
1.01M
is_comment_constant_removed
bool
1 class
is_sharp_comment_removed
bool
1 class
f702639b0a5a2b1ee290b9721b4444d46a26ff08
17,303
py
Python
python/generate_python_ctypes.py
arves100/allegro5
6187893e4ece9258136d864bf4b99cb9371d56c5
[ "BSD-3-Clause" ]
1
2020-02-19T10:56:38.000Z
2020-02-19T10:56:38.000Z
python/generate_python_ctypes.py
arves100/allegro5
6187893e4ece9258136d864bf4b99cb9371d56c5
[ "BSD-3-Clause" ]
null
null
null
python/generate_python_ctypes.py
arves100/allegro5
6187893e4ece9258136d864bf4b99cb9371d56c5
[ "BSD-3-Clause" ]
1
2019-04-15T18:02:27.000Z
2019-04-15T18:02:27.000Z
#!/usr/bin/env python import sys import re import optparse from ctypes import * """ This script will use the prototypes from "checkdocs.py -s" to concoct a 1:1 Python wrapper for Allegro. """ class _AL_UTF8String: pass class Allegro: def __init__(self): self.types = {} self.functions = {} self.constants = {} def add_struct(self, name): x = type(name, (Structure, ), {}) self.types[name] = x def add_union(self, name): x = type(name, (Union, ), {}) self.types[name] = x def get_type(self, ptype): conversion = { "bool": c_bool, "_Bool": c_bool, "char": c_byte, "unsignedchar": c_ubyte, "int": c_int, "unsigned": c_uint, "unsignedint": c_uint, "int16_t": c_int16, "uint16_t": c_uint16, "int32_t": c_int32, "uint32_t": c_uint32, "int64_t": c_int64, "uint64_t": c_uint64, "uintptr_t": c_void_p, "intptr_t": c_void_p, "GLuint": c_uint, "unsignedlong": c_ulong, "long": c_long, "size_t": c_size_t, "off_t": c_int64, "time_t": c_int64, "va_list": c_void_p, "float": c_float, "double": c_double, "al_fixed": c_int, "HWND": c_void_p, "char*": _AL_UTF8String, # hack: this probably shouldn't be in the public docs "postprocess_callback_t": c_void_p, } ptype = re.sub(r"\bstruct|union\b", "", ptype) ptype = re.sub(r"\bconst\b", "", ptype) ptype = re.sub(r"\bextern\b", "", ptype) ptype = re.sub(r"\b__inline__\b", "", ptype) ptype = re.sub(r"\s+", "", ptype) if ptype.endswith("*"): if ptype in conversion: return conversion[ptype] t = ptype[:-1] if t in self.types: return POINTER(self.types[t]) return c_void_p elif ptype in self.types: return self.types[ptype] else: try: return conversion[ptype] except KeyError: print("Type Error:" + str(ptype)) return None def parse_funcs(self, funcs): """ Go through all documented functions and add their prototypes as Python functions. The file should have been generated by Allegro's documentation generation scripts. """ for func in funcs: name, proto = func.split(":", 1) if not name.startswith("al_"): continue proto = proto.strip() name = name[:-2] if proto.startswith("enum"): continue if proto.startswith("typedef"): continue if "=" in proto: continue if proto.startswith("#"): continue funcstart = proto.find(name) funcend = funcstart + len(name) ret = proto[:funcstart].rstrip() params = proto[funcend:].strip(" ;") if params[0] != "(" or params[-1] != ")": print("Error:") print(params) continue params2 = params[1:-1] # remove callback argument lists balance = 0 params = "" for c in params2: if c == ")": balance -= 1 if balance == 0: params += c if c == "(": balance += 1 params = params.split(",") plist = [] for param in params: param = re.sub(r"\bconst\b", "", param) param = param.strip() if param == "void": continue if param == "": continue if param == "...": continue # treat arrays as a void pointer, for now if param.endswith("]") or param.endswith("*"): plist.append(c_void_p) continue # treat callbacks as a void pointer, for now if param.endswith(")"): plist.append(c_void_p) continue mob = re.match("^.*?(\w+)$", param) if mob: pnamepos = mob.start(1) if pnamepos == 0: # Seems the parameter is not named pnamepos = len(param) else: print(params) print(proto) print("") continue ptype = param[:pnamepos] ptype = self.get_type(ptype) plist.append(ptype) f = type("", (object, ), {"restype": c_int}) if not ret.endswith("void"): f.restype = self.get_type(ret) try: f.argtypes = plist except TypeError as e: print(e) print(name) print(plist) self.functions[name] = f def parse_protos(self, filename): protos = [] unions = [] funcs = [] # first pass: create all structs, but without fields for line in open(filename): name, proto = line.split(":", 1) proto = proto.lstrip() if name.endswith("()"): funcs.append(line) continue # anonymous structs have no name at all if name and not name.startswith("ALLEGRO_"): continue if name == "ALLEGRO_OGL_EXT_API": continue if proto.startswith("union") or\ proto.startswith("typedef union"): self.add_union(name) unions.append((name, proto)) elif proto.startswith("struct") or\ proto.startswith("typedef struct"): self.add_struct(name) protos.append((name, proto)) elif proto.startswith("enum") or\ proto.startswith("typedef enum"): if name: self.types[name] = c_int protos.append(("", proto)) elif proto.startswith("#define"): if not name.startswith("_") and not name.startswith("GL_"): i = eval(proto.split(None, 2)[2]) self.constants[name] = i else: # actual typedef mob = re.match("typedef (.*) " + name, proto) if mob: t = mob.group(1) self.types[name] = self.get_type(t.strip()) else: # Probably a function pointer self.types[name] = c_void_p protos += unions # second pass: fill in fields for name, proto in protos: bo = proto.find("{") if bo == -1: continue bc = proto.rfind("}") braces = proto[bo + 1:bc] if proto.startswith("enum") or \ proto.startswith("typedef enum"): fields = braces.split(",") i = 0 for field in fields: if "=" in field: fname, val = field.split("=", 1) fname = fname.strip() try: i = int(eval(val, globals(), self.constants)) except NameError: i = val else: fname = field.strip() if not fname: continue self.constants[fname] = i try: i += 1 except TypeError: pass continue balance = 0 fields = [""] for c in braces: if c == "{": balance += 1 if c == "}": balance -= 1 if c == ";" and balance == 0: fields.append("") else: fields[-1] += c flist = [] for field in fields: if not field: continue # add function pointer as void pointer mob = re.match(".*?\(\*(\w+)\)", field) if mob: flist.append((mob.group(1), "c_void_p")) continue # add any pointer as void pointer mob = re.match(".*?\*(\w+)$", field) if mob: flist.append((mob.group(1), "c_void_p")) continue # add an array mob = re.match("(.*)( \w+)\[(.*?)\]$", field) if mob: # this is all a hack n = 0 ftype = mob.group(1) if ftype.startswith("struct"): if ftype == "struct {float axis[3];}": t = "c_float * 3" else: print("Error: Can't parse " + ftype + " yet.") t = None else: n = mob.group(3) # something in A5 uses a 2d array if "][" in n: n = n.replace("][", " * ") # something uses a division expression if "/" in n: n = "(" + n.replace("/", "//") + ")" t = self.get_type(ftype).__name__ + " * " + n fname = mob.group(2) flist.append((fname, t)) continue vars = field.split(",") mob = re.match("\s*(.*?)\s+(\w+)\s*$", vars[0]) t = self.get_type(mob.group(1)) vname = mob.group(2) if t is not None and vname is not None: flist.append((vname, t.__name__)) for v in vars[1:]: flist.append((v.strip(), t.__name__)) else: print("Error: " + str(vars)) try: self.types[name].my_fields = flist except AttributeError: print(name, flist) self.parse_funcs(funcs) def main(): p = optparse.OptionParser() p.add_option("-o", "--output", help="location of generated file") p.add_option("-p", "--protos", help="A file with all " + "prototypes to generate Python wrappers for, one per line. " "Generate it with docs/scripts/checkdocs.py -p") p.add_option("-t", "--type", help="the library type to " + "use, e.g. debug") p.add_option("-v", "--version", help="the library version to " + "use, e.g. 5.1") options, args = p.parse_args() if not options.protos: p.print_help() return al = Allegro() al.parse_protos(options.protos) f = open(options.output, "w") if options.output else sys.stdout release = options.type version = options.version f.write(r"""# Generated by generate_python_ctypes.py. import os, platform, sys from ctypes import * from ctypes.util import * # You must adjust this function to point ctypes to the A5 DLLs you are # distributing. _dlls = [] def _add_dll(name): release = "%(release)s" if os.name == "nt": release = "%(release)s-%(version)s" # Under Windows, DLLs are found in the current directory, so this # would be an easy way to keep all your DLLs in a sub-folder. # os.chdir("dlls") path = find_library(name + release) if not path: if os.name == "mac": path = name + release + ".dylib" elif os.name == "nt": path = name + release + ".dll" elif os.name == "posix": if platform.mac_ver()[0]: path = name + release + ".dylib" else: path = "lib" + name + release + ".so" else: sys.stderr.write("Cannot find library " + name + "\n") # In most cases, you actually don't want the above and instead # use the exact filename within your game distribution, possibly # even within a .zip file. # if not os.path.exists(path): # path = "dlls/" + path try: # RTLD_GLOBAL is required under OSX for some reason (?) _dlls.append(CDLL(path, RTLD_GLOBAL)) except OSError: # No need to fail here, might just be one of the addons. pass # os.chdir("..") _add_dll("allegro") _add_dll("allegro_acodec") _add_dll("allegro_audio") _add_dll("allegro_primitives") _add_dll("allegro_color") _add_dll("allegro_font") _add_dll("allegro_ttf") _add_dll("allegro_image") _add_dll("allegro_dialog") _add_dll("allegro_memfile") _add_dll("allegro_physfs") _add_dll("allegro_shader") _add_dll("allegro_main") _add_dll("allegro_monolith") # We don't have information ready which A5 function is in which DLL, # so we just try them all. def _dll(func, ret, params): for dll in _dlls: try: f = dll[func] f.restype = ret f.argtypes = params return f except AttributeError: pass sys.stderr.write("Cannot find function " + func + "\n") return lambda *args: None # In Python3, all Python strings are unicode so we have to convert to # UTF8 byte strings before passing to Allegro. if sys.version_info[0] > 2: class _AL_UTF8String: def from_param(x): return x.encode("utf8") else: _AL_UTF8String = c_char_p """ % locals()) postpone = [] for name, val in sorted(al.constants.items()): try: if isinstance(val, str): val = int(eval(val, globals(), al.constants)) f.write(name + " = " + str(val) + "\n") except: postpone.append((name, val)) for name, val in postpone: f.write(name + " = " + val + "\n") structs = set() # output everything except structs and unions for name, x in sorted(al.types.items()): if not name: continue base = x.__bases__[0] if base != Structure and base != Union: f.write(name + " = " + x.__name__ + "\n") else: structs.add(name) # order structs and unions by their dependencies structs_list = [] remaining = set(structs) while remaining: for name in sorted(remaining): ok = True x = al.types[name] if hasattr(x, "my_fields"): for fname, ftype in x.my_fields: if " " in ftype: ftype = ftype.split()[0] if ftype in structs and ftype in remaining: ok = False break if ok: structs_list.append(name) remaining.remove(name) for name in structs_list: x = al.types[name] base = x.__bases__[0] f.write("class " + name + "(" + base.__name__ + "):\n") if hasattr(x, "my_fields"): f.write(" _fields_ = [\n") for fname, ftype in x.my_fields: f.write(" (\"" + fname + "\", " + ftype + "),\n") f.write(" ]\n") else: f.write(" pass\n") pt = POINTER(x) f.write("%s = POINTER(%s)\n" % (pt.__name__, name)) for name, x in sorted(al.functions.items()): try: line = name + " = _dll(\"" + name + "\", " line += x.restype.__name__ + ", " line += "[" + (", ".join([a.__name__ for a in x.argtypes])) +\ "])\n" f.write(line) except AttributeError as e: print("Ignoring " + name + " because of errors (" + str(e) + ").") # some stuff the automated parser doesn't pick up f.write(r""" ALLEGRO_VERSION_INT = \ ((ALLEGRO_VERSION << 24) | (ALLEGRO_SUB_VERSION << 16) | \ (ALLEGRO_WIP_VERSION << 8) | ALLEGRO_RELEASE_NUMBER) """) f.write(r""" # work around bug http://gcc.gnu.org/bugzilla/show_bug.cgi?id=36834 if os.name == "nt": def al_map_rgba_f(r, g, b, a): return ALLEGRO_COLOR(r, g, b, a) def al_map_rgb_f(r, g, b): return ALLEGRO_COLOR(r, g, b, 1) def al_map_rgba(r, g, b, a): return ALLEGRO_COLOR(r / 255.0, g / 255.0, b / 255.0, a / 255.0) def al_map_rgb(r, g, b): return ALLEGRO_COLOR(r / 255.0, g / 255.0, b / 255.0, 1) """) f.write(""" def al_main(real_main, *args): def python_callback(argc, argv): real_main(*args) return 0 cb = CFUNCTYPE(c_int, c_int, c_void_p)(python_callback) al_run_main(0, 0, cb); """) f.close() main()
32.042593
78
0.46385
import sys import re import optparse from ctypes import * class _AL_UTF8String: pass class Allegro: def __init__(self): self.types = {} self.functions = {} self.constants = {} def add_struct(self, name): x = type(name, (Structure, ), {}) self.types[name] = x def add_union(self, name): x = type(name, (Union, ), {}) self.types[name] = x def get_type(self, ptype): conversion = { "bool": c_bool, "_Bool": c_bool, "char": c_byte, "unsignedchar": c_ubyte, "int": c_int, "unsigned": c_uint, "unsignedint": c_uint, "int16_t": c_int16, "uint16_t": c_uint16, "int32_t": c_int32, "uint32_t": c_uint32, "int64_t": c_int64, "uint64_t": c_uint64, "uintptr_t": c_void_p, "intptr_t": c_void_p, "GLuint": c_uint, "unsignedlong": c_ulong, "long": c_long, "size_t": c_size_t, "off_t": c_int64, "time_t": c_int64, "va_list": c_void_p, "float": c_float, "double": c_double, "al_fixed": c_int, "HWND": c_void_p, "char*": _AL_UTF8String, "postprocess_callback_t": c_void_p, } ptype = re.sub(r"\bstruct|union\b", "", ptype) ptype = re.sub(r"\bconst\b", "", ptype) ptype = re.sub(r"\bextern\b", "", ptype) ptype = re.sub(r"\b__inline__\b", "", ptype) ptype = re.sub(r"\s+", "", ptype) if ptype.endswith("*"): if ptype in conversion: return conversion[ptype] t = ptype[:-1] if t in self.types: return POINTER(self.types[t]) return c_void_p elif ptype in self.types: return self.types[ptype] else: try: return conversion[ptype] except KeyError: print("Type Error:" + str(ptype)) return None def parse_funcs(self, funcs): for func in funcs: name, proto = func.split(":", 1) if not name.startswith("al_"): continue proto = proto.strip() name = name[:-2] if proto.startswith("enum"): continue if proto.startswith("typedef"): continue if "=" in proto: continue if proto.startswith("#"): continue funcstart = proto.find(name) funcend = funcstart + len(name) ret = proto[:funcstart].rstrip() params = proto[funcend:].strip(" ;") if params[0] != "(" or params[-1] != ")": print("Error:") print(params) continue params2 = params[1:-1] # remove callback argument lists balance = 0 params = "" for c in params2: if c == ")": balance -= 1 if balance == 0: params += c if c == "(": balance += 1 params = params.split(",") plist = [] for param in params: param = re.sub(r"\bconst\b", "", param) param = param.strip() if param == "void": continue if param == "": continue if param == "...": continue # treat arrays as a void pointer, for now if param.endswith("]") or param.endswith("*"): plist.append(c_void_p) continue # treat callbacks as a void pointer, for now if param.endswith(")"): plist.append(c_void_p) continue mob = re.match("^.*?(\w+)$", param) if mob: pnamepos = mob.start(1) if pnamepos == 0: # Seems the parameter is not named pnamepos = len(param) else: print(params) print(proto) print("") continue ptype = param[:pnamepos] ptype = self.get_type(ptype) plist.append(ptype) f = type("", (object, ), {"restype": c_int}) if not ret.endswith("void"): f.restype = self.get_type(ret) try: f.argtypes = plist except TypeError as e: print(e) print(name) print(plist) self.functions[name] = f def parse_protos(self, filename): protos = [] unions = [] funcs = [] # first pass: create all structs, but without fields for line in open(filename): name, proto = line.split(":", 1) proto = proto.lstrip() if name.endswith("()"): funcs.append(line) continue # anonymous structs have no name at all if name and not name.startswith("ALLEGRO_"): continue if name == "ALLEGRO_OGL_EXT_API": continue if proto.startswith("union") or\ proto.startswith("typedef union"): self.add_union(name) unions.append((name, proto)) elif proto.startswith("struct") or\ proto.startswith("typedef struct"): self.add_struct(name) protos.append((name, proto)) elif proto.startswith("enum") or\ proto.startswith("typedef enum"): if name: self.types[name] = c_int protos.append(("", proto)) elif proto.startswith("#define"): if not name.startswith("_") and not name.startswith("GL_"): i = eval(proto.split(None, 2)[2]) self.constants[name] = i else: # actual typedef mob = re.match("typedef (.*) " + name, proto) if mob: t = mob.group(1) self.types[name] = self.get_type(t.strip()) else: # Probably a function pointer self.types[name] = c_void_p protos += unions # second pass: fill in fields for name, proto in protos: bo = proto.find("{") if bo == -1: continue bc = proto.rfind("}") braces = proto[bo + 1:bc] if proto.startswith("enum") or \ proto.startswith("typedef enum"): fields = braces.split(",") i = 0 for field in fields: if "=" in field: fname, val = field.split("=", 1) fname = fname.strip() try: i = int(eval(val, globals(), self.constants)) except NameError: i = val else: fname = field.strip() if not fname: continue self.constants[fname] = i try: i += 1 except TypeError: pass continue balance = 0 fields = [""] for c in braces: if c == "{": balance += 1 if c == "}": balance -= 1 if c == ";" and balance == 0: fields.append("") else: fields[-1] += c flist = [] for field in fields: if not field: continue # add function pointer as void pointer mob = re.match(".*?\(\*(\w+)\)", field) if mob: flist.append((mob.group(1), "c_void_p")) continue # add any pointer as void pointer mob = re.match(".*?\*(\w+)$", field) if mob: flist.append((mob.group(1), "c_void_p")) continue # add an array mob = re.match("(.*)( \w+)\[(.*?)\]$", field) if mob: # this is all a hack n = 0 ftype = mob.group(1) if ftype.startswith("struct"): if ftype == "struct {float axis[3];}": t = "c_float * 3" else: print("Error: Can't parse " + ftype + " yet.") t = None else: n = mob.group(3) if "][" in n: n = n.replace("][", " * ") if "/" in n: n = "(" + n.replace("/", "//") + ")" t = self.get_type(ftype).__name__ + " * " + n fname = mob.group(2) flist.append((fname, t)) continue vars = field.split(",") mob = re.match("\s*(.*?)\s+(\w+)\s*$", vars[0]) t = self.get_type(mob.group(1)) vname = mob.group(2) if t is not None and vname is not None: flist.append((vname, t.__name__)) for v in vars[1:]: flist.append((v.strip(), t.__name__)) else: print("Error: " + str(vars)) try: self.types[name].my_fields = flist except AttributeError: print(name, flist) self.parse_funcs(funcs) def main(): p = optparse.OptionParser() p.add_option("-o", "--output", help="location of generated file") p.add_option("-p", "--protos", help="A file with all " + "prototypes to generate Python wrappers for, one per line. " "Generate it with docs/scripts/checkdocs.py -p") p.add_option("-t", "--type", help="the library type to " + "use, e.g. debug") p.add_option("-v", "--version", help="the library version to " + "use, e.g. 5.1") options, args = p.parse_args() if not options.protos: p.print_help() return al = Allegro() al.parse_protos(options.protos) f = open(options.output, "w") if options.output else sys.stdout release = options.type version = options.version f.write(r"""# Generated by generate_python_ctypes.py. import os, platform, sys from ctypes import * from ctypes.util import * # You must adjust this function to point ctypes to the A5 DLLs you are # distributing. _dlls = [] def _add_dll(name): release = "%(release)s" if os.name == "nt": release = "%(release)s-%(version)s" # Under Windows, DLLs are found in the current directory, so this # would be an easy way to keep all your DLLs in a sub-folder. # os.chdir("dlls") path = find_library(name + release) if not path: if os.name == "mac": path = name + release + ".dylib" elif os.name == "nt": path = name + release + ".dll" elif os.name == "posix": if platform.mac_ver()[0]: path = name + release + ".dylib" else: path = "lib" + name + release + ".so" else: sys.stderr.write("Cannot find library " + name + "\n") # In most cases, you actually don't want the above and instead # use the exact filename within your game distribution, possibly # even within a .zip file. # if not os.path.exists(path): # path = "dlls/" + path try: # RTLD_GLOBAL is required under OSX for some reason (?) _dlls.append(CDLL(path, RTLD_GLOBAL)) except OSError: # No need to fail here, might just be one of the addons. pass # os.chdir("..") _add_dll("allegro") _add_dll("allegro_acodec") _add_dll("allegro_audio") _add_dll("allegro_primitives") _add_dll("allegro_color") _add_dll("allegro_font") _add_dll("allegro_ttf") _add_dll("allegro_image") _add_dll("allegro_dialog") _add_dll("allegro_memfile") _add_dll("allegro_physfs") _add_dll("allegro_shader") _add_dll("allegro_main") _add_dll("allegro_monolith") # We don't have information ready which A5 function is in which DLL, # so we just try them all. def _dll(func, ret, params): for dll in _dlls: try: f = dll[func] f.restype = ret f.argtypes = params return f except AttributeError: pass sys.stderr.write("Cannot find function " + func + "\n") return lambda *args: None # In Python3, all Python strings are unicode so we have to convert to # UTF8 byte strings before passing to Allegro. if sys.version_info[0] > 2: class _AL_UTF8String: def from_param(x): return x.encode("utf8") else: _AL_UTF8String = c_char_p """ % locals()) postpone = [] for name, val in sorted(al.constants.items()): try: if isinstance(val, str): val = int(eval(val, globals(), al.constants)) f.write(name + " = " + str(val) + "\n") except: postpone.append((name, val)) for name, val in postpone: f.write(name + " = " + val + "\n") structs = set() for name, x in sorted(al.types.items()): if not name: continue base = x.__bases__[0] if base != Structure and base != Union: f.write(name + " = " + x.__name__ + "\n") else: structs.add(name) structs_list = [] remaining = set(structs) while remaining: for name in sorted(remaining): ok = True x = al.types[name] if hasattr(x, "my_fields"): for fname, ftype in x.my_fields: if " " in ftype: ftype = ftype.split()[0] if ftype in structs and ftype in remaining: ok = False break if ok: structs_list.append(name) remaining.remove(name) for name in structs_list: x = al.types[name] base = x.__bases__[0] f.write("class " + name + "(" + base.__name__ + "):\n") if hasattr(x, "my_fields"): f.write(" _fields_ = [\n") for fname, ftype in x.my_fields: f.write(" (\"" + fname + "\", " + ftype + "),\n") f.write(" ]\n") else: f.write(" pass\n") pt = POINTER(x) f.write("%s = POINTER(%s)\n" % (pt.__name__, name)) for name, x in sorted(al.functions.items()): try: line = name + " = _dll(\"" + name + "\", " line += x.restype.__name__ + ", " line += "[" + (", ".join([a.__name__ for a in x.argtypes])) +\ "])\n" f.write(line) except AttributeError as e: print("Ignoring " + name + " because of errors (" + str(e) + ").") f.write(r""" ALLEGRO_VERSION_INT = \ ((ALLEGRO_VERSION << 24) | (ALLEGRO_SUB_VERSION << 16) | \ (ALLEGRO_WIP_VERSION << 8) | ALLEGRO_RELEASE_NUMBER) """) f.write(r""" # work around bug http://gcc.gnu.org/bugzilla/show_bug.cgi?id=36834 if os.name == "nt": def al_map_rgba_f(r, g, b, a): return ALLEGRO_COLOR(r, g, b, a) def al_map_rgb_f(r, g, b): return ALLEGRO_COLOR(r, g, b, 1) def al_map_rgba(r, g, b, a): return ALLEGRO_COLOR(r / 255.0, g / 255.0, b / 255.0, a / 255.0) def al_map_rgb(r, g, b): return ALLEGRO_COLOR(r / 255.0, g / 255.0, b / 255.0, 1) """) f.write(""" def al_main(real_main, *args): def python_callback(argc, argv): real_main(*args) return 0 cb = CFUNCTYPE(c_int, c_int, c_void_p)(python_callback) al_run_main(0, 0, cb); """) f.close() main()
true
true
f70265c426e2f1f8f9b431c4f8ec20ae69355199
3,292
py
Python
one_barangay/scripts/google_cloud_storage.py
PrynsTag/oneBarangay
6a8d56003d85b8385e91f5c5d81208619023c1ee
[ "Apache-2.0" ]
null
null
null
one_barangay/scripts/google_cloud_storage.py
PrynsTag/oneBarangay
6a8d56003d85b8385e91f5c5d81208619023c1ee
[ "Apache-2.0" ]
96
2021-08-28T12:37:02.000Z
2022-03-23T04:25:12.000Z
one_barangay/scripts/google_cloud_storage.py
PrynsTag/oneBarangay
6a8d56003d85b8385e91f5c5d81208619023c1ee
[ "Apache-2.0" ]
null
null
null
"""File for Google Cloud Storage.""" import logging import os import urllib.parse from pathlib import Path import aiohttp from aiofile import AIOFile from gcloud.aio.storage import Storage from google.cloud import storage from one_barangay.local_settings import logger async def async_upload_to_bucket( filepath: str, file_obj, gcs_path: str, ): """Upload files to bucket. Args: filepath: str: The path to the file to be uploaded. file_obj: The file object from reading a file gcs_path: str: The target bucket name and sub-folder in GCS to upload to. (e.g. documents/photo) Returns: The path to the uploaded file. """ async with aiohttp.ClientSession() as session: gcs_storage = Storage(session=session) # skipcq gcs_filename = filepath.split("/")[-1] await gcs_storage.upload(gcs_path, gcs_filename, file_obj) return f"https://storage.googleapis.com/{gcs_path}/{urllib.parse.quote(gcs_filename)}" async def upload_to_gcs_runner( filepath: str, gcs_path: str, ): """Call the 'async_upload_to_bucket'. Args: filepath: str: The path to the file to be uploaded. gcs_path: str: The target bucket name and sub-folder in GCS. Returns: The path to the uploaded file. """ # target_bucket_name = target_bucket_name # bucket_folder = bucket_folder try: async with AIOFile(filepath, mode="rb") as afp: f = await afp.read() path = await async_upload_to_bucket(filepath, f, gcs_path) return path except FileNotFoundError as e: logger.exception("File not found. Make sure the file exists. %s", e) except OSError as e: logger.exception("File not uploaded. %s", e) def download_from_gcs( filename: str, target_bucket_name: str, bucket_folder: str, ): """Download file from Google Cloud Storage bucket. Args: filename: str: The name of file being downloaded. target_bucket_name: str: The bucket name from which to download to. bucket_folder: str: The folder from the bucket name from which to download to. Returns: None. """ try: storage_client = storage.Client(os.getenv("GOOGLE_PROJECT_ID")) bucket_name = storage_client.get_bucket(target_bucket_name) bucket = storage_client.get_bucket(bucket_name) path = os.path.join(bucket_folder, filename) base_dir = Path(__file__).resolve().parent.parent # TODO: Change to user location destination = os.path.join(base_dir, filename) blob = bucket.blob(path) blob.download_to_filename(destination) logging.info("%s downloaded to %s.", filename, destination) except FileNotFoundError as e: logger.exception("File not found. Make sure the file exists. %s", e) except OSError as e: logger.exception("%s not downloaded. %s", filename, e) # if __name__ == "__main__": # Sample Calls to Uploading to GCS # asyncio.run( # upload_to_gcs_runner( # "<your_absolute_filepath>" # ) # ) # Sample Calls to Downloading from GCS # download_from_gcs( # "kath.png", # str(os.getenv("GS_MEDIA_BUCKET_NAME")), # str(os.getenv("FILE_BUCKET_FOLDER")), # )
29.392857
94
0.670109
import logging import os import urllib.parse from pathlib import Path import aiohttp from aiofile import AIOFile from gcloud.aio.storage import Storage from google.cloud import storage from one_barangay.local_settings import logger async def async_upload_to_bucket( filepath: str, file_obj, gcs_path: str, ): async with aiohttp.ClientSession() as session: gcs_storage = Storage(session=session) gcs_filename = filepath.split("/")[-1] await gcs_storage.upload(gcs_path, gcs_filename, file_obj) return f"https://storage.googleapis.com/{gcs_path}/{urllib.parse.quote(gcs_filename)}" async def upload_to_gcs_runner( filepath: str, gcs_path: str, ): try: async with AIOFile(filepath, mode="rb") as afp: f = await afp.read() path = await async_upload_to_bucket(filepath, f, gcs_path) return path except FileNotFoundError as e: logger.exception("File not found. Make sure the file exists. %s", e) except OSError as e: logger.exception("File not uploaded. %s", e) def download_from_gcs( filename: str, target_bucket_name: str, bucket_folder: str, ): try: storage_client = storage.Client(os.getenv("GOOGLE_PROJECT_ID")) bucket_name = storage_client.get_bucket(target_bucket_name) bucket = storage_client.get_bucket(bucket_name) path = os.path.join(bucket_folder, filename) base_dir = Path(__file__).resolve().parent.parent destination = os.path.join(base_dir, filename) blob = bucket.blob(path) blob.download_to_filename(destination) logging.info("%s downloaded to %s.", filename, destination) except FileNotFoundError as e: logger.exception("File not found. Make sure the file exists. %s", e) except OSError as e: logger.exception("%s not downloaded. %s", filename, e)
true
true
f70265d6f013c90b4be07e53a84444ab11712fa3
6,805
py
Python
exps/LFNA/basic-same.py
xgmiao/AutoDL-Projects
0dbbc286c9f56136291590136fffd513af881c36
[ "MIT" ]
1
2021-05-11T00:41:15.000Z
2021-05-11T00:41:15.000Z
exps/LFNA/basic-same.py
xgmiao/AutoDL-Projects
0dbbc286c9f56136291590136fffd513af881c36
[ "MIT" ]
null
null
null
exps/LFNA/basic-same.py
xgmiao/AutoDL-Projects
0dbbc286c9f56136291590136fffd513af881c36
[ "MIT" ]
null
null
null
##################################################### # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.04 # ##################################################### # python exps/LFNA/basic-same.py --srange 1-999 --env_version v1 --hidden_dim 16 # python exps/LFNA/basic-same.py --srange 1-999 --env_version v2 --hidden_dim ##################################################### import sys, time, copy, torch, random, argparse from tqdm import tqdm from copy import deepcopy from pathlib import Path lib_dir = (Path(__file__).parent / ".." / ".." / "lib").resolve() if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir)) from procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint from log_utils import time_string from log_utils import AverageMeter, convert_secs2time from utils import split_str2indexes from procedures.advanced_main import basic_train_fn, basic_eval_fn from procedures.metric_utils import SaveMetric, MSEMetric, ComposeMetric from datasets.synthetic_core import get_synthetic_env from models.xcore import get_model from lfna_utils import lfna_setup def subsample(historical_x, historical_y, maxn=10000): total = historical_x.size(0) if total <= maxn: return historical_x, historical_y else: indexes = torch.randint(low=0, high=total, size=[maxn]) return historical_x[indexes], historical_y[indexes] def main(args): logger, env_info, model_kwargs = lfna_setup(args) # check indexes to be evaluated to_evaluate_indexes = split_str2indexes(args.srange, env_info["total"], None) logger.log( "Evaluate {:}, which has {:} timestamps in total.".format( args.srange, len(to_evaluate_indexes) ) ) w_container_per_epoch = dict() per_timestamp_time, start_time = AverageMeter(), time.time() for i, idx in enumerate(to_evaluate_indexes): need_time = "Time Left: {:}".format( convert_secs2time( per_timestamp_time.avg * (len(to_evaluate_indexes) - i), True ) ) logger.log( "[{:}]".format(time_string()) + " [{:04d}/{:04d}][{:04d}]".format(i, len(to_evaluate_indexes), idx) + " " + need_time ) # train the same data historical_x = env_info["{:}-x".format(idx)] historical_y = env_info["{:}-y".format(idx)] # build model model = get_model(dict(model_type="simple_mlp"), **model_kwargs) # build optimizer optimizer = torch.optim.Adam(model.parameters(), lr=args.init_lr, amsgrad=True) criterion = torch.nn.MSELoss() lr_scheduler = torch.optim.lr_scheduler.MultiStepLR( optimizer, milestones=[ int(args.epochs * 0.25), int(args.epochs * 0.5), int(args.epochs * 0.75), ], gamma=0.3, ) train_metric = MSEMetric() best_loss, best_param = None, None for _iepoch in range(args.epochs): preds = model(historical_x) optimizer.zero_grad() loss = criterion(preds, historical_y) loss.backward() optimizer.step() lr_scheduler.step() # save best if best_loss is None or best_loss > loss.item(): best_loss = loss.item() best_param = copy.deepcopy(model.state_dict()) model.load_state_dict(best_param) with torch.no_grad(): train_metric(preds, historical_y) train_results = train_metric.get_info() metric = ComposeMetric(MSEMetric(), SaveMetric()) eval_dataset = torch.utils.data.TensorDataset( env_info["{:}-x".format(idx)], env_info["{:}-y".format(idx)] ) eval_loader = torch.utils.data.DataLoader( eval_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0 ) results = basic_eval_fn(eval_loader, model, metric, logger) log_str = ( "[{:}]".format(time_string()) + " [{:04d}/{:04d}]".format(idx, env_info["total"]) + " train-mse: {:.5f}, eval-mse: {:.5f}".format( train_results["mse"], results["mse"] ) ) logger.log(log_str) save_path = logger.path(None) / "{:04d}-{:04d}.pth".format( idx, env_info["total"] ) w_container_per_epoch[idx] = model.get_w_container().no_grad_clone() save_checkpoint( { "model_state_dict": model.state_dict(), "model": model, "index": idx, "timestamp": env_info["{:}-timestamp".format(idx)], }, save_path, logger, ) logger.log("") per_timestamp_time.update(time.time() - start_time) start_time = time.time() save_checkpoint( {"w_container_per_epoch": w_container_per_epoch}, logger.path(None) / "final-ckp.pth", logger, ) logger.log("-" * 200 + "\n") logger.close() if __name__ == "__main__": parser = argparse.ArgumentParser("Use the data in the past.") parser.add_argument( "--save_dir", type=str, default="./outputs/lfna-synthetic/use-same-timestamp", help="The checkpoint directory.", ) parser.add_argument( "--env_version", type=str, required=True, help="The synthetic enviornment version.", ) parser.add_argument( "--hidden_dim", type=int, required=True, help="The hidden dimension.", ) parser.add_argument( "--init_lr", type=float, default=0.1, help="The initial learning rate for the optimizer (default is Adam)", ) parser.add_argument( "--batch_size", type=int, default=512, help="The batch size", ) parser.add_argument( "--epochs", type=int, default=1000, help="The total number of epochs.", ) parser.add_argument( "--srange", type=str, required=True, help="The range of models to be evaluated" ) parser.add_argument( "--workers", type=int, default=4, help="The number of data loading workers (default: 4)", ) # Random Seed parser.add_argument("--rand_seed", type=int, default=-1, help="manual seed") args = parser.parse_args() if args.rand_seed is None or args.rand_seed < 0: args.rand_seed = random.randint(1, 100000) assert args.save_dir is not None, "The save dir argument can not be None" args.save_dir = "{:}-{:}-d{:}".format( args.save_dir, args.env_version, args.hidden_dim ) main(args)
33.688119
87
0.580309
t(), "model": model, "index": idx, "timestamp": env_info["{:}-timestamp".format(idx)], }, save_path, logger, ) logger.log("") per_timestamp_time.update(time.time() - start_time) start_time = time.time() save_checkpoint( {"w_container_per_epoch": w_container_per_epoch}, logger.path(None) / "final-ckp.pth", logger, ) logger.log("-" * 200 + "\n") logger.close() if __name__ == "__main__": parser = argparse.ArgumentParser("Use the data in the past.") parser.add_argument( "--save_dir", type=str, default="./outputs/lfna-synthetic/use-same-timestamp", help="The checkpoint directory.", ) parser.add_argument( "--env_version", type=str, required=True, help="The synthetic enviornment version.", ) parser.add_argument( "--hidden_dim", type=int, required=True, help="The hidden dimension.", ) parser.add_argument( "--init_lr", type=float, default=0.1, help="The initial learning rate for the optimizer (default is Adam)", ) parser.add_argument( "--batch_size", type=int, default=512, help="The batch size", ) parser.add_argument( "--epochs", type=int, default=1000, help="The total number of epochs.", ) parser.add_argument( "--srange", type=str, required=True, help="The range of models to be evaluated" ) parser.add_argument( "--workers", type=int, default=4, help="The number of data loading workers (default: 4)", ) parser.add_argument("--rand_seed", type=int, default=-1, help="manual seed") args = parser.parse_args() if args.rand_seed is None or args.rand_seed < 0: args.rand_seed = random.randint(1, 100000) assert args.save_dir is not None, "The save dir argument can not be None" args.save_dir = "{:}-{:}-d{:}".format( args.save_dir, args.env_version, args.hidden_dim ) main(args)
true
true
f70266abe169cc4c264c3fbf69d349d04e550aaa
1,206
py
Python
056_Merge_Intervals.py
adwardlee/leetcode_solutions
f386869161181e153e29165d8fff06492bb192f3
[ "MIT" ]
null
null
null
056_Merge_Intervals.py
adwardlee/leetcode_solutions
f386869161181e153e29165d8fff06492bb192f3
[ "MIT" ]
null
null
null
056_Merge_Intervals.py
adwardlee/leetcode_solutions
f386869161181e153e29165d8fff06492bb192f3
[ "MIT" ]
null
null
null
''' Given a collection of intervals, merge all overlapping intervals. Example 1: Input: intervals = [[1,3],[2,6],[8,10],[15,18]] Output: [[1,6],[8,10],[15,18]] Explanation: Since intervals [1,3] and [2,6] overlaps, merge them into [1,6]. Example 2: Input: intervals = [[1,4],[4,5]] Output: [[1,5]] Explanation: Intervals [1,4] and [4,5] are considered overlapping. NOTE: input types have been changed on April 15, 2019. Please reset to default code definition to get new method signature. Constraints: intervals[i][0] <= intervals[i][1] ''' class Solution: def merge(self, intervals: List[List[int]]) -> List[List[int]]: intervals = sorted(intervals, key = lambda x: x[0]) output = [] i = 0 if len(intervals) <= 1: return intervals while i < len(intervals) - 1: tmp = intervals[i] while tmp[1] >= intervals[i + 1][0]: tmp[1] = max(tmp[1], intervals[i + 1][1]) i += 1 if i >= len(intervals) - 1: break i += 1 output.append(tmp) if i <= len(intervals) - 1: output.append(intervals[-1]) return output
29.414634
123
0.556385
class Solution: def merge(self, intervals: List[List[int]]) -> List[List[int]]: intervals = sorted(intervals, key = lambda x: x[0]) output = [] i = 0 if len(intervals) <= 1: return intervals while i < len(intervals) - 1: tmp = intervals[i] while tmp[1] >= intervals[i + 1][0]: tmp[1] = max(tmp[1], intervals[i + 1][1]) i += 1 if i >= len(intervals) - 1: break i += 1 output.append(tmp) if i <= len(intervals) - 1: output.append(intervals[-1]) return output
true
true
f70266d5dff1d3d7c3f7c40adb716334cf21a6f6
426
py
Python
src/periodic_tasks_api/views.py
VladimirDominion/periodic_tasks_utils
c2f58fd5e6c154c17d95bd9e616f53fcf10a078d
[ "MIT" ]
null
null
null
src/periodic_tasks_api/views.py
VladimirDominion/periodic_tasks_utils
c2f58fd5e6c154c17d95bd9e616f53fcf10a078d
[ "MIT" ]
null
null
null
src/periodic_tasks_api/views.py
VladimirDominion/periodic_tasks_utils
c2f58fd5e6c154c17d95bd9e616f53fcf10a078d
[ "MIT" ]
null
null
null
from rest_framework import viewsets from periodic_tasks_api.models import CustomExtendedPeriodicTask from periodic_tasks_api.serializers import PeriodicTaskSerializer from periodic_tasks_api.filters import PeriodicTaskFilterSet class PeriodicTaskView(viewsets.ModelViewSet): queryset = CustomExtendedPeriodicTask.objects.all() serializer_class = PeriodicTaskSerializer filter_backends = [PeriodicTaskFilterSet]
35.5
65
0.86385
from rest_framework import viewsets from periodic_tasks_api.models import CustomExtendedPeriodicTask from periodic_tasks_api.serializers import PeriodicTaskSerializer from periodic_tasks_api.filters import PeriodicTaskFilterSet class PeriodicTaskView(viewsets.ModelViewSet): queryset = CustomExtendedPeriodicTask.objects.all() serializer_class = PeriodicTaskSerializer filter_backends = [PeriodicTaskFilterSet]
true
true
f70268226852644181840ef9f194176c95f6c74e
11,075
py
Python
network/switch.py
energieip/sol200-simulator
8e8aca9e25c972fd0d8d6cdfe06cbd7895ce90ac
[ "Apache-2.0" ]
null
null
null
network/switch.py
energieip/sol200-simulator
8e8aca9e25c972fd0d8d6cdfe06cbd7895ce90ac
[ "Apache-2.0" ]
null
null
null
network/switch.py
energieip/sol200-simulator
8e8aca9e25c972fd0d8d6cdfe06cbd7895ce90ac
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python3 # coding: utf-8 from network.group import Group import paho.mqtt.client as mqtt from threading import Thread import time from log import logger import paho.mqtt.subscribe as subscribe import json import random import string class Switch(Thread): def __init__(self, broker_ip): Thread.__init__(self) self.broker_ip = broker_ip self.groups = {} self.drivers = { "leds" : {}, "sensors": {}, "blinds": {} } self.diagnostic = { "config": {}, "events": {} } self.name = "Switch" + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(12)) def on_disconnect(self, client, userdata, rc): if rc != 0: logger.warning("Unexpected client disconnect for %r, will reconnect", self.name) def run(self): self.client = mqtt.Client(self.name) self.client.on_message = self.event_received self.client.on_disconnect = self.on_disconnect self.client.connect(self.broker_ip) self.client.loop_start() subscribe.callback(self.event_received, "#", hostname=self.broker_ip) while self.is_alive: time.sleep(1) self.client.loop_stop() def event_received(self, client, userdata, message): try: data = message.payload.decode("utf-8") logger.debug("received url %r %r", message.topic, str(data)) if message.topic.endswith("/setup/hello"): data = json.loads(data) topic_url = data["topic"] + "/setup/config" config = {} if data["type"] == "led": config["iMax"] = 700 self.client.publish("/write/" + topic_url, json.dumps(config)) except: logger.exception("Invalid value received") def create_group(self, leds, sensors, blinds, group_id): if group_id in self.groups: return False group = Group(self.broker_ip, group_id) self.groups[group_id] = group for led in leds: group.add_led(led) for sensor in sensors: group.add_sensor(sensor) for blind in blinds: group.add_blind(blind) group.start() self.diagnostic['events'][time.time()] = "Group " + str(group_id) + "has been created and contains " + json.dumps(group.serialize()) return True def add_driver_to_group(self, group_id, driver_type, mac): if group_id not in self.groups: return False group = self.groups[group_id] if driver_type == "led": led = self.get_led(mac) if not led: return False return group.add_led(led) elif driver_type == "sensor": sensor = self.get_sensor(mac) if not sensor: return False return group.add_sensor(sensor) elif driver_type == "blind": blind = self.get_blind(mac) if not blind: return False return group.add_blind(blind) self.diagnostic['events'][time.time()] = "Driver " + driver_type + " : " + mac + "has been been added to " + group_id return False def get_group_id(self, group_id): if group_id in self.groups: return self.groups[group_id] return {} def list_groups(self): return self.groups.values() def update_group_rules(self, group_id, rule_id, value): if group_id not in self.groups: return False if rule_id == "brightness": self.groups[group_id].set_brightness(value) elif rule_id == "temperature": self.groups[group_id].set_temperature(value) elif rule_id == "presence": self.groups[group_id].set_presence(value) self.diagnostic['events'][time.time()] = "Rule " + rule_id + " is set to " + str(value) + " for " + str(group_id) return True def list_leds(self): return self.drivers["leds"].values() def get_led(self, led_id): if led_id in self.drivers["leds"]: return self.drivers["leds"][led_id] return None def plug_led(self, led): self.drivers["leds"][led.mac] = led self.diagnostic['events'][time.time()] = "New led " + led.mac + " has been plugged into the switch" def unplug_led(self, led): if led.mac in self.drivers["leds"]: del self.drivers["leds"][led.mac] self.diagnostic['events'][time.time()] = "Led " + led.mac + " has been unplugged from the switch" def list_sensors(self): return self.drivers["sensors"].values() def get_sensor(self, sensor_id): if sensor_id in self.drivers["sensors"]: return self.drivers["sensors"][sensor_id] return None def plug_sensor(self, sensor): self.drivers["sensors"][sensor.mac] = sensor self.diagnostic['events'][time.time()] = "New sensor " + sensor.mac + " has been plugged into the switch" def unplug_sensor(self, sensor): if sensor.mac in self.drivers["sensors"]: del self.drivers["sensors"][sensor.mac] self.diagnostic['events'][time.time()] = "Sensor " + sensor.mac + " has been unplugged from the switch" def switch_led_mode(self, led_id, auto=True): if led_id not in self.drivers["leds"]: return False led = self.drivers["leds"][led_id] url = "/write/" + led.base_topic + "/status/auto" logger.info("Send switch mode to %r for %r", auto, url) status = "auto" if not auto: status = "manual" self.diagnostic['events'][time.time()] = "Switch led " + led.mac + " into mode " + status self.client.publish(url, "%s" % auto) return True def list_blinds(self): return self.drivers["blinds"].values() def get_blind(self, blind_id): if blind_id in self.drivers["blinds"]: return self.drivers["blinds"][blind_id] return None def plug_blind(self, blind): self.drivers["blinds"][blind.mac] = blind self.diagnostic['events'][time.time()] = "New blind " + blind.mac + " has been plugged into the switch" def unplug_blind(self, blind): if blind.mac in self.drivers["blinds"]: del self.drivers["blinds"][blind.mac] self.diagnostic['events'][time.time()] = "Blind " + blind.mac + " has been unplugged from the switch" def get_diagnostic(self): self.diagnostic["config"]["groups"] = [group.serialize() for group in self.groups.values()] return self.diagnostic def set_manual_led_brightness(self, led_id, brightness=0): if led_id not in self.drivers["leds"]: return False led = self.drivers["leds"][led_id] url = "/write/" + led.base_topic + "/base/setpointManual" logger.info("Send setpoint to %r for %r", brightness, url) self.diagnostic['events'][time.time()] = "Force led " + led.mac + " brightness " + str(brightness) logger.info(" back %r", self.client.publish(url, str(brightness))) return True def switch_blind_mode(self, blind_id, auto=True): if blind_id not in self.drivers["blinds"]: return False blind = self.drivers["blinds"][blind_id] url = "/write/" + blind.base_topic + "/status/auto" logger.info("Send switch mode to %r for %r", auto, url) status = "auto" if not auto: status = "manual" self.diagnostic['events'][time.time()] = "Switch blind " + blind.mac + " into mode " + status self.client.publish(url, "%s" % auto) return True def set_manual_blind_position(self, blind_id, position, blind_number=0): if blind_id not in self.drivers["blinds"]: return False blind = self.drivers["blinds"][blind_id] if not blind_number or blind_number == 1: url = "/write/" + blind.base_topic + "/base/blind1Manual" logger.info("Send position to %r for %r", position, url) self.diagnostic['events'][time.time()] = "Force blind " + blind.mac + " position " + str(position) self.client.publish(url, str(position)) if not blind_number or blind_number == 2: url = "/write/" + blind.base_topic + "/base/blind2Manual" logger.info("Send position to %r for %r", position, url) self.diagnostic['events'][time.time()] = "Force blind " + blind.mac + " position " + str(position) self.client.publish(url, str(position)) def set_manual_blind_fin(self, blind_id, fin, blind_number=0): if blind_id not in self.drivers["blinds"]: return False blind = self.drivers["blinds"][blind_id] if not blind_number or blind_number == 1: url = "/write/" + blind.base_topic + "/base/fin1Manual" logger.info("Send position to %r for %r", fin, url) self.diagnostic['events'][time.time()] = "Force blind " + blind.mac + " fin " + str(fin) self.client.publish(url, str(fin)) if not blind_number or blind_number == 2: url = "/write/" + blind.base_topic + "/base/fin2Manual" logger.info("Send position to %r for %r", fin, url) self.diagnostic['events'][time.time()] = "Force blind " + blind.mac + " fin " + str(fin) self.client.publish(url, str(fin)) def switch_group_mode(self, group_id, auto=True): if group_id not in self.groups: return False group = self.groups[group_id] url = "/write/" + group.base_topic + "/status/auto" logger.info("Send switch mode to %r for %r", auto, url) status = "auto" if not auto: status = "manual" self.diagnostic['events'][time.time()] = "Switch group " + str(group.group_id) + " into mode " + str(status) self.client.publish(url, "%s" % auto) return True def set_group_setpoint(self, group_id, setpoint): if group_id not in self.groups: return False group = self.groups[group_id] url = "/write/" + group.base_topic + "/config/setpoint" logger.info("Send setpoint value to %r for %r", setpoint, url) self.diagnostic['events'][time.time()] = "Send setpoint " + str(setpoint) + " to group " + str(group.group_id) self.client.publish(url, str(setpoint)) return True def set_group_blind_position(self, group_id, position): if group_id not in self.groups: return False group = self.groups[group_id] url = "/write/" + group.base_topic + "/config/blindPosition" logger.info("Send setpoint value to %r for %r", position, url) self.diagnostic['events'][time.time()] = "Send blind position " + str(position) + " to group " + str(group.group_id) self.client.publish(url, str(position)) return True
39.695341
140
0.591332
from network.group import Group import paho.mqtt.client as mqtt from threading import Thread import time from log import logger import paho.mqtt.subscribe as subscribe import json import random import string class Switch(Thread): def __init__(self, broker_ip): Thread.__init__(self) self.broker_ip = broker_ip self.groups = {} self.drivers = { "leds" : {}, "sensors": {}, "blinds": {} } self.diagnostic = { "config": {}, "events": {} } self.name = "Switch" + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(12)) def on_disconnect(self, client, userdata, rc): if rc != 0: logger.warning("Unexpected client disconnect for %r, will reconnect", self.name) def run(self): self.client = mqtt.Client(self.name) self.client.on_message = self.event_received self.client.on_disconnect = self.on_disconnect self.client.connect(self.broker_ip) self.client.loop_start() subscribe.callback(self.event_received, "#", hostname=self.broker_ip) while self.is_alive: time.sleep(1) self.client.loop_stop() def event_received(self, client, userdata, message): try: data = message.payload.decode("utf-8") logger.debug("received url %r %r", message.topic, str(data)) if message.topic.endswith("/setup/hello"): data = json.loads(data) topic_url = data["topic"] + "/setup/config" config = {} if data["type"] == "led": config["iMax"] = 700 self.client.publish("/write/" + topic_url, json.dumps(config)) except: logger.exception("Invalid value received") def create_group(self, leds, sensors, blinds, group_id): if group_id in self.groups: return False group = Group(self.broker_ip, group_id) self.groups[group_id] = group for led in leds: group.add_led(led) for sensor in sensors: group.add_sensor(sensor) for blind in blinds: group.add_blind(blind) group.start() self.diagnostic['events'][time.time()] = "Group " + str(group_id) + "has been created and contains " + json.dumps(group.serialize()) return True def add_driver_to_group(self, group_id, driver_type, mac): if group_id not in self.groups: return False group = self.groups[group_id] if driver_type == "led": led = self.get_led(mac) if not led: return False return group.add_led(led) elif driver_type == "sensor": sensor = self.get_sensor(mac) if not sensor: return False return group.add_sensor(sensor) elif driver_type == "blind": blind = self.get_blind(mac) if not blind: return False return group.add_blind(blind) self.diagnostic['events'][time.time()] = "Driver " + driver_type + " : " + mac + "has been been added to " + group_id return False def get_group_id(self, group_id): if group_id in self.groups: return self.groups[group_id] return {} def list_groups(self): return self.groups.values() def update_group_rules(self, group_id, rule_id, value): if group_id not in self.groups: return False if rule_id == "brightness": self.groups[group_id].set_brightness(value) elif rule_id == "temperature": self.groups[group_id].set_temperature(value) elif rule_id == "presence": self.groups[group_id].set_presence(value) self.diagnostic['events'][time.time()] = "Rule " + rule_id + " is set to " + str(value) + " for " + str(group_id) return True def list_leds(self): return self.drivers["leds"].values() def get_led(self, led_id): if led_id in self.drivers["leds"]: return self.drivers["leds"][led_id] return None def plug_led(self, led): self.drivers["leds"][led.mac] = led self.diagnostic['events'][time.time()] = "New led " + led.mac + " has been plugged into the switch" def unplug_led(self, led): if led.mac in self.drivers["leds"]: del self.drivers["leds"][led.mac] self.diagnostic['events'][time.time()] = "Led " + led.mac + " has been unplugged from the switch" def list_sensors(self): return self.drivers["sensors"].values() def get_sensor(self, sensor_id): if sensor_id in self.drivers["sensors"]: return self.drivers["sensors"][sensor_id] return None def plug_sensor(self, sensor): self.drivers["sensors"][sensor.mac] = sensor self.diagnostic['events'][time.time()] = "New sensor " + sensor.mac + " has been plugged into the switch" def unplug_sensor(self, sensor): if sensor.mac in self.drivers["sensors"]: del self.drivers["sensors"][sensor.mac] self.diagnostic['events'][time.time()] = "Sensor " + sensor.mac + " has been unplugged from the switch" def switch_led_mode(self, led_id, auto=True): if led_id not in self.drivers["leds"]: return False led = self.drivers["leds"][led_id] url = "/write/" + led.base_topic + "/status/auto" logger.info("Send switch mode to %r for %r", auto, url) status = "auto" if not auto: status = "manual" self.diagnostic['events'][time.time()] = "Switch led " + led.mac + " into mode " + status self.client.publish(url, "%s" % auto) return True def list_blinds(self): return self.drivers["blinds"].values() def get_blind(self, blind_id): if blind_id in self.drivers["blinds"]: return self.drivers["blinds"][blind_id] return None def plug_blind(self, blind): self.drivers["blinds"][blind.mac] = blind self.diagnostic['events'][time.time()] = "New blind " + blind.mac + " has been plugged into the switch" def unplug_blind(self, blind): if blind.mac in self.drivers["blinds"]: del self.drivers["blinds"][blind.mac] self.diagnostic['events'][time.time()] = "Blind " + blind.mac + " has been unplugged from the switch" def get_diagnostic(self): self.diagnostic["config"]["groups"] = [group.serialize() for group in self.groups.values()] return self.diagnostic def set_manual_led_brightness(self, led_id, brightness=0): if led_id not in self.drivers["leds"]: return False led = self.drivers["leds"][led_id] url = "/write/" + led.base_topic + "/base/setpointManual" logger.info("Send setpoint to %r for %r", brightness, url) self.diagnostic['events'][time.time()] = "Force led " + led.mac + " brightness " + str(brightness) logger.info(" back %r", self.client.publish(url, str(brightness))) return True def switch_blind_mode(self, blind_id, auto=True): if blind_id not in self.drivers["blinds"]: return False blind = self.drivers["blinds"][blind_id] url = "/write/" + blind.base_topic + "/status/auto" logger.info("Send switch mode to %r for %r", auto, url) status = "auto" if not auto: status = "manual" self.diagnostic['events'][time.time()] = "Switch blind " + blind.mac + " into mode " + status self.client.publish(url, "%s" % auto) return True def set_manual_blind_position(self, blind_id, position, blind_number=0): if blind_id not in self.drivers["blinds"]: return False blind = self.drivers["blinds"][blind_id] if not blind_number or blind_number == 1: url = "/write/" + blind.base_topic + "/base/blind1Manual" logger.info("Send position to %r for %r", position, url) self.diagnostic['events'][time.time()] = "Force blind " + blind.mac + " position " + str(position) self.client.publish(url, str(position)) if not blind_number or blind_number == 2: url = "/write/" + blind.base_topic + "/base/blind2Manual" logger.info("Send position to %r for %r", position, url) self.diagnostic['events'][time.time()] = "Force blind " + blind.mac + " position " + str(position) self.client.publish(url, str(position)) def set_manual_blind_fin(self, blind_id, fin, blind_number=0): if blind_id not in self.drivers["blinds"]: return False blind = self.drivers["blinds"][blind_id] if not blind_number or blind_number == 1: url = "/write/" + blind.base_topic + "/base/fin1Manual" logger.info("Send position to %r for %r", fin, url) self.diagnostic['events'][time.time()] = "Force blind " + blind.mac + " fin " + str(fin) self.client.publish(url, str(fin)) if not blind_number or blind_number == 2: url = "/write/" + blind.base_topic + "/base/fin2Manual" logger.info("Send position to %r for %r", fin, url) self.diagnostic['events'][time.time()] = "Force blind " + blind.mac + " fin " + str(fin) self.client.publish(url, str(fin)) def switch_group_mode(self, group_id, auto=True): if group_id not in self.groups: return False group = self.groups[group_id] url = "/write/" + group.base_topic + "/status/auto" logger.info("Send switch mode to %r for %r", auto, url) status = "auto" if not auto: status = "manual" self.diagnostic['events'][time.time()] = "Switch group " + str(group.group_id) + " into mode " + str(status) self.client.publish(url, "%s" % auto) return True def set_group_setpoint(self, group_id, setpoint): if group_id not in self.groups: return False group = self.groups[group_id] url = "/write/" + group.base_topic + "/config/setpoint" logger.info("Send setpoint value to %r for %r", setpoint, url) self.diagnostic['events'][time.time()] = "Send setpoint " + str(setpoint) + " to group " + str(group.group_id) self.client.publish(url, str(setpoint)) return True def set_group_blind_position(self, group_id, position): if group_id not in self.groups: return False group = self.groups[group_id] url = "/write/" + group.base_topic + "/config/blindPosition" logger.info("Send setpoint value to %r for %r", position, url) self.diagnostic['events'][time.time()] = "Send blind position " + str(position) + " to group " + str(group.group_id) self.client.publish(url, str(position)) return True
true
true
f7026834a7a7d7b060fd2eab47981312a89bbc7e
4,063
py
Python
setup.py
mpbelhorn/OctoPrint-BedLevelVisualizer
77622c65a06a8eb3a12e9fad0281386334dc8314
[ "MIT" ]
null
null
null
setup.py
mpbelhorn/OctoPrint-BedLevelVisualizer
77622c65a06a8eb3a12e9fad0281386334dc8314
[ "MIT" ]
null
null
null
setup.py
mpbelhorn/OctoPrint-BedLevelVisualizer
77622c65a06a8eb3a12e9fad0281386334dc8314
[ "MIT" ]
null
null
null
# coding=utf-8 ######################################################################################################################## ### Do not forget to adjust the following variables to your own plugin. # The plugin's identifier, has to be unique plugin_identifier = "bedlevelvisualizer" # The plugin's python package, should be "octoprint_<plugin identifier>", has to be unique plugin_package = "octoprint_bedlevelvisualizer" # The plugin's human readable name. Can be overwritten within OctoPrint's internal data via __plugin_name__ in the # plugin module plugin_name = "Bed Visualizer" # The plugin's version. Can be overwritten within OctoPrint's internal data via __plugin_version__ in the plugin module plugin_version = "0.1.15" # The plugin's description. Can be overwritten within OctoPrint's internal data via __plugin_description__ in the plugin # module plugin_description = """Displays 3D mesh of bed topography report.""" # The plugin's author. Can be overwritten within OctoPrint's internal data via __plugin_author__ in the plugin module plugin_author = "jneilliii" # The plugin's author's mail address. plugin_author_email = "jneilliii+octoprint@gmail.com" # The plugin's homepage URL. Can be overwritten within OctoPrint's internal data via __plugin_url__ in the plugin module plugin_url = "https://github.com/jneilliii/OctoPrint-BedLevelVisualizer" # The plugin's license. Can be overwritten within OctoPrint's internal data via __plugin_license__ in the plugin module plugin_license = "AGPLv3" # Any additional requirements besides OctoPrint should be listed here plugin_requires = ["numpy>=1.16.0,<=1.19.2"] ### -------------------------------------------------------------------------------------------------------------------- ### More advanced options that you usually shouldn't have to touch follow after this point ### -------------------------------------------------------------------------------------------------------------------- # Additional package data to install for this plugin. The subfolders "templates", "static" and "translations" will # already be installed automatically if they exist. plugin_additional_data = [] # Any additional python packages you need to install with your plugin that are not contains in <plugin_package>.* plugin_addtional_packages = [] # Any python packages within <plugin_package>.* you do NOT want to install with your plugin plugin_ignored_packages = [] # Additional parameters for the call to setuptools.setup. If your plugin wants to register additional entry points, # define dependency links or other things like that, this is the place to go. Will be merged recursively with the # default setup parameters as provided by octoprint_setuptools.create_plugin_setup_parameters using # octoprint.util.dict_merge. # # Example: # plugin_requires = ["someDependency==dev"] # additional_setup_parameters = {"dependency_links": ["https://github.com/someUser/someRepo/archive/master.zip#egg=someDependency-dev"]} additional_setup_parameters = {} ######################################################################################################################## from setuptools import setup try: import octoprint_setuptools except: print("Could not import OctoPrint's setuptools, are you sure you are running that under " "the same python installation that OctoPrint is installed under?") import sys sys.exit(-1) setup_parameters = octoprint_setuptools.create_plugin_setup_parameters( identifier=plugin_identifier, package=plugin_package, name=plugin_name, version=plugin_version, description=plugin_description, author=plugin_author, mail=plugin_author_email, url=plugin_url, license=plugin_license, requires=plugin_requires, additional_packages=plugin_addtional_packages, ignored_packages=plugin_ignored_packages, additional_data=plugin_additional_data ) if len(additional_setup_parameters): from octoprint.util import dict_merge setup_parameters = dict_merge(setup_parameters, additional_setup_parameters) setup(**setup_parameters)
42.768421
140
0.707113
true
true
f702692290f1f965b54b54b17c0a5c2fc4387772
1,398
py
Python
tests/cases/py_client/parameter_name_conflict/client.py
koji8y/swagger-to
8c9201a71220b183aa55b10d61ec322008633f58
[ "MIT" ]
null
null
null
tests/cases/py_client/parameter_name_conflict/client.py
koji8y/swagger-to
8c9201a71220b183aa55b10d61ec322008633f58
[ "MIT" ]
null
null
null
tests/cases/py_client/parameter_name_conflict/client.py
koji8y/swagger-to
8c9201a71220b183aa55b10d61ec322008633f58
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # Automatically generated file by swagger_to. DO NOT EDIT OR APPEND ANYTHING! """Implements the client for test.""" # pylint: skip-file # pydocstyle: add-ignore=D105,D107,D401 import contextlib import json from typing import Any, BinaryIO, Dict, List, MutableMapping, Optional import requests import requests.auth class RemoteCaller: """Executes the remote calls to the server.""" def __init__(self, url_prefix: str, auth: Optional[requests.auth.AuthBase] = None) -> None: self.url_prefix = url_prefix self.auth = auth def test_me( self, query_some_parameter: str, path_some_parameter: str) -> bytes: """ Is a test endpoint. :param query_some_parameter: :param path_some_parameter: :return: a confirmation """ url = "".join([ self.url_prefix, '/products/', str(path_some_parameter)]) params = {} # type: Dict[str, str] params['some_parameter'] = query_some_parameter resp = requests.request( method='get', url=url, params=params, auth=self.auth) with contextlib.closing(resp): resp.raise_for_status() return resp.content # Automatically generated file by swagger_to. DO NOT EDIT OR APPEND ANYTHING!
24.964286
95
0.615165
import contextlib import json from typing import Any, BinaryIO, Dict, List, MutableMapping, Optional import requests import requests.auth class RemoteCaller: def __init__(self, url_prefix: str, auth: Optional[requests.auth.AuthBase] = None) -> None: self.url_prefix = url_prefix self.auth = auth def test_me( self, query_some_parameter: str, path_some_parameter: str) -> bytes: url = "".join([ self.url_prefix, '/products/', str(path_some_parameter)]) params = {} params['some_parameter'] = query_some_parameter resp = requests.request( method='get', url=url, params=params, auth=self.auth) with contextlib.closing(resp): resp.raise_for_status() return resp.content
true
true
f702695464c85f76c6dbad36b5d94f9cc2ae757a
52,412
py
Python
caret_analyze/infra/lttng/records_provider_lttng.py
tier4/CARET_analyze
96f29eaaf1ff961f6410df38d938d421272f99f3
[ "Apache-2.0" ]
null
null
null
caret_analyze/infra/lttng/records_provider_lttng.py
tier4/CARET_analyze
96f29eaaf1ff961f6410df38d938d421272f99f3
[ "Apache-2.0" ]
12
2021-10-08T04:43:17.000Z
2022-03-22T10:42:53.000Z
caret_analyze/infra/lttng/records_provider_lttng.py
tier4/CARET_analyze
96f29eaaf1ff961f6410df38d938d421272f99f3
[ "Apache-2.0" ]
4
2021-12-14T05:47:14.000Z
2022-03-16T11:26:59.000Z
# Copyright 2021 Research Institute of Systems Planning, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import cached_property from logging import getLogger from typing import Dict, List, Optional, Sequence, Tuple, Union from caret_analyze.value_objects.message_context import MessageContext, MessageContextType from .lttng import Lttng from .value_objects import PublisherValueLttng, SubscriptionCallbackValueLttng from ...common import Columns, Util from ...exceptions import (InvalidArgumentError, UnsupportedNodeRecordsError, UnsupportedTypeError) from ...infra.interface import RuntimeDataProvider from ...infra.lttng.column_names import COLUMN_NAME from ...record import (merge, merge_sequencial, RecordsFactory, RecordsInterface) from ...value_objects import (CallbackChain, CallbackStructValue, CommunicationStructValue, InheritUniqueStamp, NodePathStructValue, PublisherStructValue, Qos, SubscriptionCallbackStructValue, SubscriptionStructValue, Tilde, TimerCallbackStructValue, UseLatestMessage, VariablePassingStructValue) logger = getLogger(__name__) class RecordsProviderLttng(RuntimeDataProvider): """ Records are processed and measurement results are calculated. In addition to merging, filtering and other operations are performed here. """ def __init__( self, lttng: Lttng ) -> None: self._lttng = lttng self._source = FilteredRecordsSource(lttng) self._helper = RecordsProviderLttngHelper(lttng) def communication_records( self, comm_val: CommunicationStructValue ) -> RecordsInterface: """ Provide communication records. Parameters ---------- comm_info : CommunicationStructInfo communicadtion info. Returns ------- RecordsInterface Columns [inter process communication case]: - [topic_name]/rclcpp_publish_timestamp - [topic_name]/rcl_publish_timestamp - [topic_name]/dds_publish_timestamp - [callback_name]/callback_start_timestamp Columns [intra process communication case]: - [topic_name]/rclcpp_intra_publish_timestamp - [topic_name]/message_timestamp - [callback_name]/callback_start_timestamp """ assert comm_val.subscribe_callback_name is not None if self.is_intra_process_communication(comm_val): return self._compose_intra_proc_comm_records(comm_val) return self._compose_inter_proc_comm_records(comm_val) def node_records( self, node_path_val: NodePathStructValue, ) -> RecordsInterface: if node_path_val.message_context is None: # dummy record msg = 'message context is None. return dummy record. ' msg += f'node_name: {node_path_val.node_name}' logger.info(msg) return RecordsFactory.create_instance() if node_path_val.message_context_type == MessageContextType.CALLBACK_CHAIN: return NodeRecordsCallbackChain(self, node_path_val).to_records() if node_path_val.message_context_type == MessageContextType.INHERIT_UNIQUE_STAMP: return NodeRecordsInheritUniqueTimestamp(self, node_path_val).to_records() if node_path_val.message_context_type == MessageContextType.USE_LATEST_MESSAGE: return NodeRecordsUseLatestMessage(self, node_path_val).to_records() if node_path_val.message_context_type == MessageContextType.TILDE: return NodeRecordsTilde(self, node_path_val).to_records() raise UnsupportedNodeRecordsError( 'Unknown message context. ' f'message_context = {node_path_val.message_context.context_type.type_name}' ) def callback_records( self, callback: CallbackStructValue ) -> RecordsInterface: """ Return callback duration records. Parameters ---------- callback_val : CallbackStructValue target callback value. Returns ------- RecordsInterface Columns - [callback_name]/callback_start_timestamp - [callback_name]/callback_end_timestamp """ callback_objects = self._helper.get_callback_objects(callback) callback_records = self._source.callback_records(*callback_objects) columns = [ COLUMN_NAME.CALLBACK_START_TIMESTAMP, COLUMN_NAME.CALLBACK_END_TIMESTAMP ] self._format(callback_records, columns) self._rename_column(callback_records, callback.callback_name, None) return callback_records def subscribe_records( self, subscription: SubscriptionStructValue ) -> RecordsInterface: """ Provide subscription records. Parameters ---------- subscription_value : SubscriptionStructValue Target subscription value. Returns ------- RecordsInterface Columns - [callback_name]/callback_start_timestamp - [topic_name]/message_timestamp - [topic_name]/source_timestamp Raises ------ InvalidArgumentError """ callback = subscription.callback assert callback is not None tilde_subscription = self._helper.get_tilde_subscription(callback) if tilde_subscription is None: return self._subscribe_records(subscription) return self._subscribe_records_with_tilde(subscription) def _subscribe_records( self, subscription: SubscriptionStructValue ) -> RecordsInterface: """ Provide subscription records. Parameters ---------- subscription_value : SubscriptionStructValue Target subscription value. Returns ------- RecordsInterface Columns - [callback_name]/callback_start_timestamp - [topic_name]/message_timestamp - [topic_name]/source_timestamp Raises ------ InvalidArgumentError """ callback = subscription.callback if callback is None: raise InvalidArgumentError( 'callback_value is None. ' f'node_name: {subscription.node_name}' f'callback_name: {subscription.callback_name}' f'topic_name: {subscription.topic_name}' ) callback_objects = self._helper.get_subscription_callback_objects(callback) sub_records = self._source.sub_records(*callback_objects) columns = [ COLUMN_NAME.CALLBACK_START_TIMESTAMP, COLUMN_NAME.MESSAGE_TIMESTAMP, COLUMN_NAME.SOURCE_TIMESTAMP, ] self._format(sub_records, columns) self._rename_column( sub_records, callback.callback_name, subscription.topic_name ) return sub_records def _subscribe_records_with_tilde( self, subscription: SubscriptionStructValue ) -> RecordsInterface: """ Provide subscription records. Parameters ---------- subscription_value : SubscriptionStructValue Target subscription value. Returns ------- RecordsInterface Columns - [callback_name]/callback_start_timestamp - [topic_name]/message_timestamp - [topic_name]/source_timestamp - [topic_name]/tilde_subscribe_timestamp - [topic_name]/tilde_message_id Raises ------ InvalidArgumentError """ callback = subscription.callback if callback is None: raise InvalidArgumentError( 'callback_value is None. ' f'node_name: {subscription.node_name}' f'callback_name: {subscription.callback_name}' f'topic_name: {subscription.topic_name}' ) callback_objects = self._helper.get_subscription_callback_objects(callback) sub_records = self._source.sub_records(*callback_objects) tilde_subscription = self._helper.get_tilde_subscription(callback) if tilde_subscription is not None: tilde_records = self._source.tilde_subscribe_records(tilde_subscription) sub_records = merge_sequencial( left_records=sub_records, right_records=tilde_records, left_stamp_key=COLUMN_NAME.CALLBACK_START_TIMESTAMP, right_stamp_key=COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP, join_left_key=None, join_right_key=None, how='left', columns=Columns(sub_records.columns + tilde_records.columns).as_list(), progress_label='binding: tilde_records', ) columns = [ COLUMN_NAME.CALLBACK_START_TIMESTAMP, COLUMN_NAME.MESSAGE_TIMESTAMP, COLUMN_NAME.SOURCE_TIMESTAMP, COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP, COLUMN_NAME.TILDE_MESSAGE_ID, ] self._format(sub_records, columns) self._rename_column( sub_records, callback.callback_name, subscription.topic_name ) return sub_records def _publish_records( self, publisher: PublisherStructValue ) -> RecordsInterface: """ Return publish records. Parameters ---------- publish : PublisherStructValue target publisher Returns ------- RecordsInterface Columns - [topic_name]/rclcpp_publish_timestamp - [topic_name]/rclcpp_intra_publish_timestamp - [topic_name]/rclcpp_inter_publish_timestamp - [topic_name]/rcl_publish_timestamp - [topic_name]/dds_write_timestamp - [topic_name]/message_timestamp - [topic_name]/source_timestamp """ publisher_handles = self._helper.get_publisher_handles(publisher) pub_records = self._source.publish_records(publisher_handles) columns = [ COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP, COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP, COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP, COLUMN_NAME.RCL_PUBLISH_TIMESTAMP, COLUMN_NAME.DDS_WRITE_TIMESTAMP, COLUMN_NAME.MESSAGE_TIMESTAMP, COLUMN_NAME.SOURCE_TIMESTAMP, ] self._format(pub_records, columns) self._rename_column(pub_records, None, publisher.topic_name) return pub_records def publish_records( self, publisher: PublisherStructValue ) -> RecordsInterface: """ Return publish records. Parameters ---------- publish : PublisherStructValue target publisher Returns ------- RecordsInterface Columns - [topic_name]/rclcpp_publish_timestamp - [topic_name]/rclcpp_intra_publish_timestamp - [topic_name]/rclcpp_inter_publish_timestamp - [topic_name]/rcl_publish_timestamp - [topic_name]/dds_write_timestamp - [topic_name]/message_timestamp - [topic_name]/source_timestamp --- - [topic_name]/tilde_publish_timestamp - [topic_name]/tilde_message_id """ tilde_publishers = self._helper.get_tilde_publishers(publisher) if len(tilde_publishers) == 0: return self._publish_records(publisher) return self._publish_records_with_tilde(publisher) def _publish_records_with_tilde( self, publisher: PublisherStructValue ) -> RecordsInterface: """ Return publish records. Parameters ---------- publish : PublisherStructValue target publisher Returns ------- RecordsInterface Columns - [topic_name]/rclcpp_publish_timestamp - [topic_name]/rclcpp_intra_publish_timestamp - [topic_name]/rclcpp_inter_publish_timestamp - [topic_name]/rcl_publish_timestamp - [topic_name]/dds_write_timestamp - [topic_name]/message_timestamp - [topic_name]/source_timestamp - [topic_name]/tilde_publish_timestamp - [topic_name]/tilde_message_id """ publisher_handles = self._helper.get_publisher_handles(publisher) pub_records = self._source.publish_records(publisher_handles) tilde_publishers = self._helper.get_tilde_publishers(publisher) tilde_records = self._source.tilde_publish_records(tilde_publishers) pub_records = merge_sequencial( left_records=tilde_records, right_records=pub_records, left_stamp_key='tilde_publish_timestamp', right_stamp_key='rclcpp_publish_timestamp', join_left_key=None, join_right_key=None, columns=Columns(tilde_records.columns + pub_records.columns).as_list(), how='right', progress_label='binding: tilde_records', ) columns = [ COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP, COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP, COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP, COLUMN_NAME.RCL_PUBLISH_TIMESTAMP, COLUMN_NAME.DDS_WRITE_TIMESTAMP, COLUMN_NAME.MESSAGE_TIMESTAMP, COLUMN_NAME.SOURCE_TIMESTAMP, COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP, COLUMN_NAME.TILDE_MESSAGE_ID, ] self._format(pub_records, columns) self._rename_column(pub_records, None, publisher.topic_name) return pub_records def tilde_records( self, subscription: SubscriptionStructValue, publisher: PublisherStructValue ) -> RecordsInterface: assert subscription.callback is not None publisher_addrs = self._helper.get_tilde_publishers(publisher) subscription_addr = self._helper.get_tilde_subscription(subscription.callback) assert len(publisher_addrs) > 0 assert subscription_addr is not None pub_records = self._source.tilde_publish_records(publisher_addrs) sub_records = self._source.tilde_subscribe_records(subscription_addr) records = merge( left_records=sub_records, right_records=pub_records, join_left_key=COLUMN_NAME.TILDE_MESSAGE_ID, join_right_key=COLUMN_NAME.TILDE_MESSAGE_ID, columns=Columns(sub_records.columns + pub_records.columns).as_list(), how='left', progress_label='binding: tilde pub and sub records' ) columns = [ COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP, COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP ] self._format(records, columns) self._rename_column(records, subscription.callback_name, subscription.topic_name) return records def get_rmw_implementation(self) -> str: return self._lttng.get_rmw_impl() def get_qos( self, pub_sub: Union[PublisherStructValue, SubscriptionStructValue] ) -> Qos: if isinstance(pub_sub, SubscriptionStructValue): sub_cb = pub_sub.callback if sub_cb is None: raise InvalidArgumentError('Failed to get callback information.' 'pub.callback is None') sub_cb_lttng = self._helper.get_lttng_subscription(sub_cb) return self._lttng.get_subscription_qos(sub_cb_lttng) pubs_lttng = self._helper.get_lttng_publishers(pub_sub) if len(pubs_lttng) == 0: raise InvalidArgumentError('No publisher matching the criteria was found.') if len(pubs_lttng) > 1: logger.warning( 'Multiple publishers matching your criteria were found.' 'The value of the first publisher qos will be returned.') return self._lttng.get_publisher_qos(pubs_lttng[0]) def variable_passing_records( self, variable_passing_info: VariablePassingStructValue ) -> RecordsInterface: """ Return variable passing records. Parameters ---------- variable_passing_info : VariablePassingStructInfo target variable passing info. Returns ------- RecordsInterface Columns - [callback_name]/callback_end_timestamp - [callback_name]/callback_start_timestamp """ read_records: RecordsInterface = self.callback_records( variable_passing_info.callback_read) write_records: RecordsInterface = self.callback_records( variable_passing_info.callback_write) read_records.drop_columns([read_records.columns[-1]]) # callback end write_records.drop_columns([write_records.columns[0]]) # callback_start columns = [ write_records.columns[0], read_records.columns[0], ] merged_records = merge_sequencial( left_records=write_records, right_records=read_records, left_stamp_key=columns[0], right_stamp_key=columns[1], join_left_key=None, join_right_key=None, columns=columns, how='left_use_latest', progress_label='binding: callback_end and callback_start' ) merged_records.sort(columns[0]) self._format(merged_records, columns) return merged_records def is_intra_process_communication( self, communication_value: CommunicationStructValue ) -> Optional[bool]: intra_record = self._compose_intra_proc_comm_records(communication_value) return len(intra_record) > 0 def _compose_intra_proc_comm_records( self, comm_info: CommunicationStructValue, ) -> RecordsInterface: """ Compose intra process communication records. Parameters ---------- comm_info : CommunicationStructInfo Target communication info. Returns ------- RecordsInterface Columns - [topic_name]/rclcpp_publish_timestamp - [callback_name]/callback_start_timestamp """ publisher = comm_info.publisher subscription_cb = comm_info.subscribe_callback assert subscription_cb is not None assert isinstance(subscription_cb, SubscriptionCallbackStructValue) publisher_handles = self._helper.get_publisher_handles(publisher) callback_object_intra = self._helper.get_subscription_callback_object_intra( subscription_cb) records = self._source.intra_comm_records(publisher_handles, callback_object_intra) columns = [ COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP, COLUMN_NAME.CALLBACK_START_TIMESTAMP, ] self._format(records, columns) records.rename_columns({ COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP: COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP }) self._rename_column(records, comm_info.subscribe_callback_name, comm_info.topic_name) return records def _compose_inter_proc_comm_records( self, comm_value: CommunicationStructValue ) -> RecordsInterface: """ Composer intar process communication records. Parameters ---------- comm_value : CommunicationStructValue target communication value. Returns ------- RecordsInterface Columns - [topic_name]/rclcpp_publish_timestamp - [topic_name]/rcl_publish_timestamp - [topic_name]/dds_write_timestamp - [callback_name_name]/callback_start_timestamp """ publisher = comm_value.publisher subscription_cb = comm_value.subscribe_callback assert subscription_cb is not None assert isinstance(subscription_cb, SubscriptionCallbackStructValue) publisher_handles = self._helper.get_publisher_handles(publisher) callback_object = self._helper.get_subscription_callback_object_inter(subscription_cb) records = self._source.inter_comm_records(publisher_handles, callback_object) columns = [ COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP, COLUMN_NAME.RCL_PUBLISH_TIMESTAMP, COLUMN_NAME.DDS_WRITE_TIMESTAMP, COLUMN_NAME.CALLBACK_START_TIMESTAMP ] self._format(records, columns) records.rename_columns({ COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP: COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP }) self._rename_column(records, comm_value.subscribe_callback_name, comm_value.topic_name) return records @staticmethod def _format(records: RecordsInterface, columns: List[str]): drop = list(set(records.columns) - set(columns)) records.drop_columns(drop) records.reindex(columns) @staticmethod def _rename_column( records: RecordsInterface, callback_name: Optional[str], topic_name: Optional[str] ) -> None: rename_dict = {} if COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP] = \ f'{topic_name}/{COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP}' if COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP] = \ f'{topic_name}/{COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP}' if COLUMN_NAME.CALLBACK_START_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.CALLBACK_START_TIMESTAMP] = \ f'{callback_name}/{COLUMN_NAME.CALLBACK_START_TIMESTAMP}' if COLUMN_NAME.CALLBACK_END_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.CALLBACK_END_TIMESTAMP] = \ f'{callback_name}/{COLUMN_NAME.CALLBACK_END_TIMESTAMP}' if COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP] = \ f'{topic_name}/{COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP}' if COLUMN_NAME.RCL_PUBLISH_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.RCL_PUBLISH_TIMESTAMP] = \ f'{topic_name}/{COLUMN_NAME.RCL_PUBLISH_TIMESTAMP}' if COLUMN_NAME.DDS_WRITE_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.DDS_WRITE_TIMESTAMP] = \ f'{topic_name}/{COLUMN_NAME.DDS_WRITE_TIMESTAMP}' if COLUMN_NAME.MESSAGE_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.MESSAGE_TIMESTAMP] = \ f'{topic_name}/{COLUMN_NAME.MESSAGE_TIMESTAMP}' if COLUMN_NAME.SOURCE_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.SOURCE_TIMESTAMP] = \ f'{topic_name}/{COLUMN_NAME.SOURCE_TIMESTAMP}' if COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP] = \ f'{topic_name}/{COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP}' if COLUMN_NAME.TILDE_MESSAGE_ID in records.columns: rename_dict[COLUMN_NAME.TILDE_MESSAGE_ID] = \ f'{topic_name}/{COLUMN_NAME.TILDE_MESSAGE_ID}' if COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP] = \ f'{topic_name}/{COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP}' records.rename_columns(rename_dict) class RecordsProviderLttngHelper: def __init__( self, lttng: Lttng ) -> None: from .bridge import LttngBridge self._bridge = LttngBridge(lttng) def get_callback_objects( self, callback: CallbackStructValue ) -> Tuple[int, Optional[int]]: if isinstance(callback, TimerCallbackStructValue): return self.get_timer_callback_object(callback), None if isinstance(callback, SubscriptionCallbackStructValue): obj = self.get_subscription_callback_object_inter(callback) obj_intra = self.get_subscription_callback_object_intra(callback) if obj_intra is not None: return obj, obj_intra return obj, None msg = 'Failed to get callback object. ' msg += f'{callback.callback_type.type_name} is not supported.' raise UnsupportedTypeError(msg) def get_timer_callback_object( self, callback: TimerCallbackStructValue ) -> int: callback_lttng = self._bridge.get_timer_callback(callback) return callback_lttng.callback_object def get_subscription_callback_objects( self, callback: SubscriptionCallbackStructValue ) -> Tuple[int, Optional[int]]: return self.get_callback_objects(callback) def get_subscription_callback_object_inter( self, callback: SubscriptionCallbackStructValue ) -> int: callback_lttng = self._bridge.get_subscription_callback(callback) return callback_lttng.callback_object def get_subscription_callback_object_intra( self, callback: SubscriptionCallbackStructValue ) -> Optional[int]: callback_lttng = self._bridge.get_subscription_callback(callback) return callback_lttng.callback_object_intra def get_tilde_subscription( self, callback: SubscriptionCallbackStructValue ) -> Optional[int]: callback_lttng = self._bridge.get_subscription_callback(callback) return callback_lttng.tilde_subscription def get_publisher_handles( self, publisher: PublisherStructValue ) -> List[int]: publisher_lttng = self._bridge.get_publishers(publisher) return [pub_info.publisher_handle for pub_info in publisher_lttng] def get_tilde_publishers( self, publisher_info: PublisherStructValue ) -> List[int]: publisher_lttng = self._bridge.get_publishers(publisher_info) publisher = [pub_info.tilde_publisher for pub_info in publisher_lttng if pub_info.tilde_publisher is not None] return publisher def get_lttng_publishers( self, publisher: PublisherStructValue ) -> List[PublisherValueLttng]: return self._bridge.get_publishers(publisher) def get_lttng_subscription( self, callback: SubscriptionCallbackStructValue ) -> SubscriptionCallbackValueLttng: return self._bridge.get_subscription_callback(callback) class NodeRecordsCallbackChain: def __init__( self, provider: RecordsProviderLttng, node_path: NodePathStructValue, ) -> None: self._provider = provider self._validate(node_path) self._val = node_path def to_records(self): chain_info = self._val.child if isinstance(chain_info[0], CallbackStructValue): cb_info = chain_info[0] records = self._provider.callback_records(cb_info) else: var_pass_info = chain_info[0] records = self._provider.variable_passing_records(var_pass_info) for chain_element in chain_info[1:]: if isinstance(chain_element, CallbackStructValue): records_ = self._provider.callback_records(chain_element) join_key = records_.columns[0] records = merge( left_records=records, right_records=records_, join_left_key=join_key, join_right_key=join_key, columns=Columns(records.columns + records_.columns), how='left', progress_label='binding: callback_start and callback end' ) continue if isinstance(chain_element, VariablePassingStructValue): records_ = self._provider.variable_passing_records(chain_element) # self._rename_var_pass_records(records_, chain_element) join_key = records_.columns[0] records = merge( left_records=records, right_records=records_, join_left_key=join_key, join_right_key=join_key, columns=Columns(records.columns + records_.columns).as_list(), how='left', progress_label='binding: callback_end and callback start' ) continue last_element = chain_info[-1] if isinstance(last_element, CallbackStructValue) \ and self._val.publisher is not None: last_callback_end_name = Util.filter_items( lambda x: COLUMN_NAME.CALLBACK_END_TIMESTAMP in x, records.columns)[-1] records.drop_columns([last_callback_end_name]) last_callback_start_name = Util.filter_items( lambda x: COLUMN_NAME.CALLBACK_START_TIMESTAMP in x, records.columns)[-1] publish_records = self._provider.publish_records(self._val.publisher) publish_column = publish_records.columns[0] columns = records.columns + [publish_column] records = merge_sequencial( left_records=records, right_records=publish_records, join_left_key=None, join_right_key=None, left_stamp_key=last_callback_start_name, right_stamp_key=publish_column, columns=Columns(records.columns + publish_records.columns).as_list(), how='left', progress_label='binding: callback_start and publish', ) records.drop_columns(list(set(records.columns) - set(columns))) records.reindex(columns) return records @staticmethod def _validate( node_path: NodePathStructValue, ) -> None: if node_path.callbacks is None: raise UnsupportedNodeRecordsError('') if node_path.callbacks is None: raise UnsupportedNodeRecordsError('callback values is None.') if not isinstance(node_path.message_context, CallbackChain): msg = 'node_path.message context is not CallbackChain' raise UnsupportedNodeRecordsError(msg) head_callback = node_path.callbacks[0] tail_callback = node_path.callbacks[-1] if node_path.publish_topic_name is not None and \ tail_callback.publish_topic_names is not None and \ len(tail_callback.publish_topic_names) != 0 and \ node_path.publish_topic_name not in tail_callback.publish_topic_names: raise UnsupportedNodeRecordsError('') if node_path.subscribe_topic_name is not None and \ node_path.subscribe_topic_name != head_callback.subscribe_topic_name: raise UnsupportedNodeRecordsError('') class NodeRecordsInheritUniqueTimestamp: def __init__( self, provider: RecordsProviderLttng, node_path: NodePathStructValue, ) -> None: if node_path.message_context is None: raise UnsupportedNodeRecordsError('node_path.message context is None') if not isinstance(node_path.message_context, InheritUniqueStamp): msg = 'node_path.message context is not InheritUniqueStamp' raise UnsupportedNodeRecordsError(msg) self._provider = provider self._context: InheritUniqueStamp = node_path.message_context self._validate(node_path, self._context) self._node_path = node_path def to_records(self): sub_records = self._provider.subscribe_records(self._node_path.subscription) pub_records = self._provider.publish_records(self._node_path.publisher) columns = [ sub_records.columns[0], pub_records.columns[0], ] join_left_key = f'{self._node_path.subscribe_topic_name}/{COLUMN_NAME.MESSAGE_TIMESTAMP}' join_right_key = f'{self._node_path.publish_topic_name}/{COLUMN_NAME.MESSAGE_TIMESTAMP}' pub_sub_records = merge_sequencial( left_records=sub_records, right_records=pub_records, left_stamp_key=sub_records.columns[0], right_stamp_key=pub_records.columns[0], join_left_key=join_left_key, join_right_key=join_right_key, columns=Columns(sub_records.columns + pub_records.columns).as_list(), how='left_use_latest', progress_label='binding: inherit unique timestamp', ) drop_columns = list(set(pub_sub_records.columns) - set(columns)) pub_sub_records.drop_columns(drop_columns) pub_sub_records.reindex(columns) return pub_sub_records @staticmethod def _validate( node_path: NodePathStructValue, context: InheritUniqueStamp, ) -> None: def is_valid() -> bool: if context.publisher_topic_name != node_path.publish_topic_name: return False if context.subscription_topic_name != node_path.subscribe_topic_name: return False return True if is_valid(): return None msg = f'InheritUniqueStamp cannot build records. \n{node_path} \n{context}' raise UnsupportedNodeRecordsError(msg) class NodeRecordsUseLatestMessage: def __init__( self, provider: RecordsProviderLttng, node_path: NodePathStructValue, ) -> None: if node_path.message_context is None: raise UnsupportedNodeRecordsError('node_path.message context is None') if not isinstance(node_path.message_context, UseLatestMessage): raise UnsupportedNodeRecordsError('node_path.message context is not UseLatestMessage') self._provider = provider self._context: UseLatestMessage = node_path.message_context self._validate(node_path, self._context) self._node_path = node_path def to_records(self): sub_records = self._provider.subscribe_records(self._node_path.subscription) pub_records = self._provider.publish_records(self._node_path.publisher) columns = [ sub_records.columns[0], f'{self._node_path.publish_topic_name}/rclcpp_publish_timestamp', ] pub_sub_records = merge_sequencial( left_records=sub_records, right_records=pub_records, left_stamp_key=sub_records.columns[0], right_stamp_key=pub_records.columns[0], join_left_key=None, join_right_key=None, columns=Columns(sub_records.columns + pub_records.columns).as_list(), how='left_use_latest', progress_label='binding use_latest_message.' ) drop_columns = list(set(pub_sub_records.columns) - set(columns)) pub_sub_records.drop_columns(drop_columns) pub_sub_records.reindex(columns) return pub_sub_records @staticmethod def _validate( node_path: NodePathStructValue, context: UseLatestMessage, ) -> None: def is_valid() -> bool: if context.publisher_topic_name != node_path.publish_topic_name: return False if context.subscription_topic_name != node_path.subscribe_topic_name: return False return True if is_valid(): return None msg = f'UseLatest cannot build records. \n{node_path} \n{context}' raise UnsupportedNodeRecordsError(msg) class NodeRecordsTilde: def __init__( self, provider: RecordsProviderLttng, node_path: NodePathStructValue, ) -> None: if node_path.message_context is None: raise UnsupportedNodeRecordsError('node_path.message context is None') if not isinstance(node_path.message_context, Tilde): raise UnsupportedNodeRecordsError('node_path.message context is not UseLatestMessage') self._provider = provider self._context: MessageContext = node_path.message_context self._validate(node_path, self._context) self._node_path = node_path def to_records(self): tilde_records = self._provider.tilde_records( self._node_path.subscription, self._node_path.publisher) sub_records = self._provider.subscribe_records(self._node_path.subscription) pub_records = self._provider.publish_records(self._node_path.publisher) left_stamp_key = Util.find_one( lambda x: COLUMN_NAME.CALLBACK_START_TIMESTAMP in x, sub_records.columns) right_stamp_key = Util.find_one( lambda x: COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP in x, sub_records.columns) records = merge_sequencial( left_records=sub_records, right_records=tilde_records, left_stamp_key=left_stamp_key, right_stamp_key=right_stamp_key, join_left_key=None, join_right_key=None, columns=Columns(sub_records.columns + tilde_records.columns).as_list(), how='left', progress_label='binding tilde subscribe records.' ) left_stamp_key = Util.find_one( lambda x: COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP in x, records.columns) right_stamp_key = Util.find_one( lambda x: COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP in x, pub_records.columns) records = merge_sequencial( left_records=records, right_records=pub_records, left_stamp_key=left_stamp_key, right_stamp_key=right_stamp_key, join_left_key=None, join_right_key=None, columns=Columns(records.columns + pub_records.columns).as_list(), how='left', progress_label='binding tilde publish records.' ) columns = [ Util.find_one(lambda x: COLUMN_NAME.CALLBACK_START_TIMESTAMP in x, records.columns), Util.find_one(lambda x: COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP in x, records.columns), ] drop_columns = list(set(records.columns) - set(columns)) records.drop_columns(drop_columns) records.reindex(columns) return records @staticmethod def _validate( node_path: NodePathStructValue, context: MessageContext, ) -> None: def is_valid() -> bool: if not isinstance(context, Tilde): return False if context.publisher_topic_name != node_path.publish_topic_name: return False if context.subscription_topic_name != node_path.subscribe_topic_name: return False return True if is_valid(): return None msg = f'UseLatest cannot build records. \n{node_path} \n{context}' raise UnsupportedNodeRecordsError(msg) class FilteredRecordsSource: def __init__(self, lttng: Lttng): self._lttng = lttng def tilde_subscribe_records( self, tilde_subscription: int ) -> RecordsInterface: """ Compose filtered tilde subscribe records. Parameters ---------- tilde_subscription : int Returns ------- RecordsInterface Equivalent to the following process. records = lttng.compose_tilde_subscribe_records() records.filter_if( lambda x: x.get('tilde_subscription') == tilde_subscription ) records.drop_columns(['tilde_subscription]) """ sub_records = RecordsFactory.create_instance( None, [ COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP, COLUMN_NAME.TILDE_SUBSCRIPTION, COLUMN_NAME.TILDE_MESSAGE_ID ] ) if tilde_subscription is not None and \ tilde_subscription in self._grouped_tilde_sub_records: sub_records_ = self._grouped_tilde_sub_records[tilde_subscription].clone() sub_records.concat(sub_records_) sub_records.drop_columns([COLUMN_NAME.TILDE_SUBSCRIPTION]) return sub_records def sub_records( self, inter_callback_object: int, intra_callback_object: Optional[int] ) -> RecordsInterface: """ Compose filtered subscribe records. Parameters ---------- inter_callback_object : int intra_callback_object : Optional[int] Returns ------- RecordsInterface Equivalent to the following process. records = lttng.compose_subscribe_records() records.filter_if( lambda x: x.get('callback_object') in [ inter_callback_object, intra_callback_object ] ) """ sub_records = RecordsFactory.create_instance( None, [ COLUMN_NAME.CALLBACK_START_TIMESTAMP, COLUMN_NAME.MESSAGE_TIMESTAMP, COLUMN_NAME.SOURCE_TIMESTAMP, ] ) records = self._grouped_sub_records if inter_callback_object in records: sub_records.concat(records[inter_callback_object].clone()) if intra_callback_object is not None and intra_callback_object in records: intra_sub_records = records[intra_callback_object].clone() sub_records.concat(intra_sub_records) sub_records.sort(COLUMN_NAME.CALLBACK_START_TIMESTAMP) return sub_records def inter_comm_records( self, publisher_handles: List[int], callback_object: int ) -> RecordsInterface: """ Compose filtered inter communication records. Parameters ---------- publisher_handles : List[int] callback_object : int Returns ------- RecordsInterface Equivalent to the following process. records = lttng.compose_inter_proc_comm_records() records.filter_if( lambda x: x.get('callback_object') == callback_object and x.get('publisher_handle') in publisher_handles ) """ records = RecordsFactory.create_instance( None, [ COLUMN_NAME.CALLBACK_OBJECT, COLUMN_NAME.CALLBACK_START_TIMESTAMP, COLUMN_NAME.PUBLISHER_HANDLE, COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP, COLUMN_NAME.RCL_PUBLISH_TIMESTAMP, COLUMN_NAME.DDS_WRITE_TIMESTAMP ] ) for publisher_handle in publisher_handles: key = (callback_object, publisher_handle) if key in self._grouped_inter_comm_records: comm_records = self._grouped_inter_comm_records[key].clone() records.concat(comm_records) records.sort(COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP) return records def intra_comm_records( self, publisher_handles: List[int], intra_callback_object: Optional[int] ) -> RecordsInterface: """ Compose filtered intra communication records. Parameters ---------- publisher_handles : List[int] [description] intra_callback_object : Optional[int] [description] Returns ------- RecordsInterface Equivalent to the following process. records = lttng.compose_intra_proc_comm_records() records.filter_if( lambda x: x.get('callback_object') == callback_object and x.get('publisher_handle') in publisher_handles ) """ records = RecordsFactory.create_instance( None, [ COLUMN_NAME.CALLBACK_OBJECT, COLUMN_NAME.CALLBACK_START_TIMESTAMP, COLUMN_NAME.PUBLISHER_HANDLE, COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP, COLUMN_NAME.MESSAGE_TIMESTAMP ] ) if intra_callback_object is not None: for publisher_handle in publisher_handles: key = (intra_callback_object, publisher_handle) if key in self._grouped_intra_comm_records: records_ = self._grouped_intra_comm_records[key].clone() records.concat(records_) records.sort(COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP) return records def publish_records( self, publisher_handles: List[int], ) -> RecordsInterface: """ Compose publish records. Parameters ---------- publisher_handles : List[int] Returns ------- RecordsInterface Equivalent to the following process. records = lttng.compose_publish_records() records.filter_if( lambda x: x.get('publisher_handle') in publisher_handles ] ) """ records = self._grouped_publish_records pub_records = RecordsFactory.create_instance( None, [ COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP, COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP, COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP, COLUMN_NAME.RCL_PUBLISH_TIMESTAMP, COLUMN_NAME.DDS_WRITE_TIMESTAMP, COLUMN_NAME.MESSAGE_TIMESTAMP, COLUMN_NAME.SOURCE_TIMESTAMP, COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP, COLUMN_NAME.TILDE_MESSAGE_ID, ] ) for publisher_handle in publisher_handles: if publisher_handle in records: inter_pub_records = records[publisher_handle].clone() pub_records.concat(inter_pub_records) return pub_records def tilde_publish_records( self, tilde_publishers: Sequence[int] ) -> RecordsInterface: """ Compose tilde publish records. Parameters ---------- tilde_publishers : Sequence[int] Returns ------- RecordsInterface Equivalent to the following process. records = lttng.compose_tilde_publish_records() records.filter_if( lambda x: x.get('tilde_publisher') in tilde_publishers ) """ tilde_grouped_records = self._grouped_tilde_pub_records tilde_records = RecordsFactory.create_instance( None, [ COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP, COLUMN_NAME.TILDE_PUBLISHER, COLUMN_NAME.TILDE_MESSAGE_ID, COLUMN_NAME.TILDE_SUBSCRIPTION, ]) for tilde_publisher in tilde_publishers: if tilde_publisher in tilde_grouped_records: tilde_records_ = tilde_grouped_records[tilde_publisher].clone() tilde_records.concat(tilde_records_) tilde_records.drop_columns([COLUMN_NAME.TILDE_PUBLISHER]) return tilde_records def _expand_key_tuple( self, group: Dict[Tuple[int, ...], RecordsInterface] ) -> Dict[int, RecordsInterface]: group_: Dict[int, RecordsInterface] = {} for key in group.keys(): assert len(key) == 1 group_[key[0]] = group[key] return group_ def callback_records( self, inter_callback_object: int, intra_callback_object: Optional[int] ) -> RecordsInterface: """ Compose callback records. Parameters ---------- inter_callback_object : int intra_callback_object : Optional[int] Returns ------- RecordsInterface Equivalent to the following process. records = lttng.compose_callback_records() records.filter_if( lambda x: x.['callback_object] in [inter_callback_object, intra_callback_object] ) """ records = self._grouped_callback_records callback_records = RecordsFactory.create_instance( None, [COLUMN_NAME.CALLBACK_START_TIMESTAMP, COLUMN_NAME.CALLBACK_END_TIMESTAMP] ) if inter_callback_object in records: inter_callback_records = records[inter_callback_object].clone() callback_records.concat(inter_callback_records) if intra_callback_object is not None and intra_callback_object in records: intra_callback_records = records[intra_callback_object].clone() callback_records.concat(intra_callback_records) callback_records.sort(COLUMN_NAME.CALLBACK_START_TIMESTAMP) return callback_records @cached_property def _grouped_callback_records(self) -> Dict[int, RecordsInterface]: records = self._lttng.compose_callback_records() group = records.groupby([COLUMN_NAME.CALLBACK_OBJECT]) return self._expand_key_tuple(group) @cached_property def _grouped_inter_comm_records(self) -> Dict[Tuple[int, ...], RecordsInterface]: records = self._lttng.compose_inter_proc_comm_records() return records.groupby([COLUMN_NAME.CALLBACK_OBJECT, COLUMN_NAME.PUBLISHER_HANDLE]) @cached_property def _grouped_intra_comm_records(self) -> Dict[Tuple[int, ...], RecordsInterface]: records = self._lttng.compose_intra_proc_comm_records() return records.groupby([COLUMN_NAME.CALLBACK_OBJECT, COLUMN_NAME.PUBLISHER_HANDLE]) @cached_property def _grouped_publish_records(self) -> Dict[int, RecordsInterface]: records = self._lttng.compose_publish_records() group = records.groupby([COLUMN_NAME.PUBLISHER_HANDLE]) return self._expand_key_tuple(group) @cached_property def _grouped_sub_records(self) -> Dict[int, RecordsInterface]: records = self._lttng.compose_subscribe_records() group = records.groupby([COLUMN_NAME.CALLBACK_OBJECT]) return self._expand_key_tuple(group) @cached_property def _grouped_tilde_pub_records(self) -> Dict[int, RecordsInterface]: records = self._lttng.compose_tilde_publish_records() group = records.groupby([COLUMN_NAME.TILDE_PUBLISHER]) return self._expand_key_tuple(group) @cached_property def _grouped_tilde_sub_records(self) -> Dict[int, RecordsInterface]: records = self._lttng.compose_tilde_subscribe_records() group = records.groupby([COLUMN_NAME.TILDE_SUBSCRIPTION]) return self._expand_key_tuple(group)
35.128686
98
0.63869
from functools import cached_property from logging import getLogger from typing import Dict, List, Optional, Sequence, Tuple, Union from caret_analyze.value_objects.message_context import MessageContext, MessageContextType from .lttng import Lttng from .value_objects import PublisherValueLttng, SubscriptionCallbackValueLttng from ...common import Columns, Util from ...exceptions import (InvalidArgumentError, UnsupportedNodeRecordsError, UnsupportedTypeError) from ...infra.interface import RuntimeDataProvider from ...infra.lttng.column_names import COLUMN_NAME from ...record import (merge, merge_sequencial, RecordsFactory, RecordsInterface) from ...value_objects import (CallbackChain, CallbackStructValue, CommunicationStructValue, InheritUniqueStamp, NodePathStructValue, PublisherStructValue, Qos, SubscriptionCallbackStructValue, SubscriptionStructValue, Tilde, TimerCallbackStructValue, UseLatestMessage, VariablePassingStructValue) logger = getLogger(__name__) class RecordsProviderLttng(RuntimeDataProvider): def __init__( self, lttng: Lttng ) -> None: self._lttng = lttng self._source = FilteredRecordsSource(lttng) self._helper = RecordsProviderLttngHelper(lttng) def communication_records( self, comm_val: CommunicationStructValue ) -> RecordsInterface: assert comm_val.subscribe_callback_name is not None if self.is_intra_process_communication(comm_val): return self._compose_intra_proc_comm_records(comm_val) return self._compose_inter_proc_comm_records(comm_val) def node_records( self, node_path_val: NodePathStructValue, ) -> RecordsInterface: if node_path_val.message_context is None: msg = 'message context is None. return dummy record. ' msg += f'node_name: {node_path_val.node_name}' logger.info(msg) return RecordsFactory.create_instance() if node_path_val.message_context_type == MessageContextType.CALLBACK_CHAIN: return NodeRecordsCallbackChain(self, node_path_val).to_records() if node_path_val.message_context_type == MessageContextType.INHERIT_UNIQUE_STAMP: return NodeRecordsInheritUniqueTimestamp(self, node_path_val).to_records() if node_path_val.message_context_type == MessageContextType.USE_LATEST_MESSAGE: return NodeRecordsUseLatestMessage(self, node_path_val).to_records() if node_path_val.message_context_type == MessageContextType.TILDE: return NodeRecordsTilde(self, node_path_val).to_records() raise UnsupportedNodeRecordsError( 'Unknown message context. ' f'message_context = {node_path_val.message_context.context_type.type_name}' ) def callback_records( self, callback: CallbackStructValue ) -> RecordsInterface: callback_objects = self._helper.get_callback_objects(callback) callback_records = self._source.callback_records(*callback_objects) columns = [ COLUMN_NAME.CALLBACK_START_TIMESTAMP, COLUMN_NAME.CALLBACK_END_TIMESTAMP ] self._format(callback_records, columns) self._rename_column(callback_records, callback.callback_name, None) return callback_records def subscribe_records( self, subscription: SubscriptionStructValue ) -> RecordsInterface: callback = subscription.callback assert callback is not None tilde_subscription = self._helper.get_tilde_subscription(callback) if tilde_subscription is None: return self._subscribe_records(subscription) return self._subscribe_records_with_tilde(subscription) def _subscribe_records( self, subscription: SubscriptionStructValue ) -> RecordsInterface: callback = subscription.callback if callback is None: raise InvalidArgumentError( 'callback_value is None. ' f'node_name: {subscription.node_name}' f'callback_name: {subscription.callback_name}' f'topic_name: {subscription.topic_name}' ) callback_objects = self._helper.get_subscription_callback_objects(callback) sub_records = self._source.sub_records(*callback_objects) columns = [ COLUMN_NAME.CALLBACK_START_TIMESTAMP, COLUMN_NAME.MESSAGE_TIMESTAMP, COLUMN_NAME.SOURCE_TIMESTAMP, ] self._format(sub_records, columns) self._rename_column( sub_records, callback.callback_name, subscription.topic_name ) return sub_records def _subscribe_records_with_tilde( self, subscription: SubscriptionStructValue ) -> RecordsInterface: callback = subscription.callback if callback is None: raise InvalidArgumentError( 'callback_value is None. ' f'node_name: {subscription.node_name}' f'callback_name: {subscription.callback_name}' f'topic_name: {subscription.topic_name}' ) callback_objects = self._helper.get_subscription_callback_objects(callback) sub_records = self._source.sub_records(*callback_objects) tilde_subscription = self._helper.get_tilde_subscription(callback) if tilde_subscription is not None: tilde_records = self._source.tilde_subscribe_records(tilde_subscription) sub_records = merge_sequencial( left_records=sub_records, right_records=tilde_records, left_stamp_key=COLUMN_NAME.CALLBACK_START_TIMESTAMP, right_stamp_key=COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP, join_left_key=None, join_right_key=None, how='left', columns=Columns(sub_records.columns + tilde_records.columns).as_list(), progress_label='binding: tilde_records', ) columns = [ COLUMN_NAME.CALLBACK_START_TIMESTAMP, COLUMN_NAME.MESSAGE_TIMESTAMP, COLUMN_NAME.SOURCE_TIMESTAMP, COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP, COLUMN_NAME.TILDE_MESSAGE_ID, ] self._format(sub_records, columns) self._rename_column( sub_records, callback.callback_name, subscription.topic_name ) return sub_records def _publish_records( self, publisher: PublisherStructValue ) -> RecordsInterface: publisher_handles = self._helper.get_publisher_handles(publisher) pub_records = self._source.publish_records(publisher_handles) columns = [ COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP, COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP, COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP, COLUMN_NAME.RCL_PUBLISH_TIMESTAMP, COLUMN_NAME.DDS_WRITE_TIMESTAMP, COLUMN_NAME.MESSAGE_TIMESTAMP, COLUMN_NAME.SOURCE_TIMESTAMP, ] self._format(pub_records, columns) self._rename_column(pub_records, None, publisher.topic_name) return pub_records def publish_records( self, publisher: PublisherStructValue ) -> RecordsInterface: tilde_publishers = self._helper.get_tilde_publishers(publisher) if len(tilde_publishers) == 0: return self._publish_records(publisher) return self._publish_records_with_tilde(publisher) def _publish_records_with_tilde( self, publisher: PublisherStructValue ) -> RecordsInterface: publisher_handles = self._helper.get_publisher_handles(publisher) pub_records = self._source.publish_records(publisher_handles) tilde_publishers = self._helper.get_tilde_publishers(publisher) tilde_records = self._source.tilde_publish_records(tilde_publishers) pub_records = merge_sequencial( left_records=tilde_records, right_records=pub_records, left_stamp_key='tilde_publish_timestamp', right_stamp_key='rclcpp_publish_timestamp', join_left_key=None, join_right_key=None, columns=Columns(tilde_records.columns + pub_records.columns).as_list(), how='right', progress_label='binding: tilde_records', ) columns = [ COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP, COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP, COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP, COLUMN_NAME.RCL_PUBLISH_TIMESTAMP, COLUMN_NAME.DDS_WRITE_TIMESTAMP, COLUMN_NAME.MESSAGE_TIMESTAMP, COLUMN_NAME.SOURCE_TIMESTAMP, COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP, COLUMN_NAME.TILDE_MESSAGE_ID, ] self._format(pub_records, columns) self._rename_column(pub_records, None, publisher.topic_name) return pub_records def tilde_records( self, subscription: SubscriptionStructValue, publisher: PublisherStructValue ) -> RecordsInterface: assert subscription.callback is not None publisher_addrs = self._helper.get_tilde_publishers(publisher) subscription_addr = self._helper.get_tilde_subscription(subscription.callback) assert len(publisher_addrs) > 0 assert subscription_addr is not None pub_records = self._source.tilde_publish_records(publisher_addrs) sub_records = self._source.tilde_subscribe_records(subscription_addr) records = merge( left_records=sub_records, right_records=pub_records, join_left_key=COLUMN_NAME.TILDE_MESSAGE_ID, join_right_key=COLUMN_NAME.TILDE_MESSAGE_ID, columns=Columns(sub_records.columns + pub_records.columns).as_list(), how='left', progress_label='binding: tilde pub and sub records' ) columns = [ COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP, COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP ] self._format(records, columns) self._rename_column(records, subscription.callback_name, subscription.topic_name) return records def get_rmw_implementation(self) -> str: return self._lttng.get_rmw_impl() def get_qos( self, pub_sub: Union[PublisherStructValue, SubscriptionStructValue] ) -> Qos: if isinstance(pub_sub, SubscriptionStructValue): sub_cb = pub_sub.callback if sub_cb is None: raise InvalidArgumentError('Failed to get callback information.' 'pub.callback is None') sub_cb_lttng = self._helper.get_lttng_subscription(sub_cb) return self._lttng.get_subscription_qos(sub_cb_lttng) pubs_lttng = self._helper.get_lttng_publishers(pub_sub) if len(pubs_lttng) == 0: raise InvalidArgumentError('No publisher matching the criteria was found.') if len(pubs_lttng) > 1: logger.warning( 'Multiple publishers matching your criteria were found.' 'The value of the first publisher qos will be returned.') return self._lttng.get_publisher_qos(pubs_lttng[0]) def variable_passing_records( self, variable_passing_info: VariablePassingStructValue ) -> RecordsInterface: read_records: RecordsInterface = self.callback_records( variable_passing_info.callback_read) write_records: RecordsInterface = self.callback_records( variable_passing_info.callback_write) read_records.drop_columns([read_records.columns[-1]]) write_records.drop_columns([write_records.columns[0]]) columns = [ write_records.columns[0], read_records.columns[0], ] merged_records = merge_sequencial( left_records=write_records, right_records=read_records, left_stamp_key=columns[0], right_stamp_key=columns[1], join_left_key=None, join_right_key=None, columns=columns, how='left_use_latest', progress_label='binding: callback_end and callback_start' ) merged_records.sort(columns[0]) self._format(merged_records, columns) return merged_records def is_intra_process_communication( self, communication_value: CommunicationStructValue ) -> Optional[bool]: intra_record = self._compose_intra_proc_comm_records(communication_value) return len(intra_record) > 0 def _compose_intra_proc_comm_records( self, comm_info: CommunicationStructValue, ) -> RecordsInterface: publisher = comm_info.publisher subscription_cb = comm_info.subscribe_callback assert subscription_cb is not None assert isinstance(subscription_cb, SubscriptionCallbackStructValue) publisher_handles = self._helper.get_publisher_handles(publisher) callback_object_intra = self._helper.get_subscription_callback_object_intra( subscription_cb) records = self._source.intra_comm_records(publisher_handles, callback_object_intra) columns = [ COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP, COLUMN_NAME.CALLBACK_START_TIMESTAMP, ] self._format(records, columns) records.rename_columns({ COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP: COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP }) self._rename_column(records, comm_info.subscribe_callback_name, comm_info.topic_name) return records def _compose_inter_proc_comm_records( self, comm_value: CommunicationStructValue ) -> RecordsInterface: publisher = comm_value.publisher subscription_cb = comm_value.subscribe_callback assert subscription_cb is not None assert isinstance(subscription_cb, SubscriptionCallbackStructValue) publisher_handles = self._helper.get_publisher_handles(publisher) callback_object = self._helper.get_subscription_callback_object_inter(subscription_cb) records = self._source.inter_comm_records(publisher_handles, callback_object) columns = [ COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP, COLUMN_NAME.RCL_PUBLISH_TIMESTAMP, COLUMN_NAME.DDS_WRITE_TIMESTAMP, COLUMN_NAME.CALLBACK_START_TIMESTAMP ] self._format(records, columns) records.rename_columns({ COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP: COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP }) self._rename_column(records, comm_value.subscribe_callback_name, comm_value.topic_name) return records @staticmethod def _format(records: RecordsInterface, columns: List[str]): drop = list(set(records.columns) - set(columns)) records.drop_columns(drop) records.reindex(columns) @staticmethod def _rename_column( records: RecordsInterface, callback_name: Optional[str], topic_name: Optional[str] ) -> None: rename_dict = {} if COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP] = \ f'{topic_name}/{COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP}' if COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP] = \ f'{topic_name}/{COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP}' if COLUMN_NAME.CALLBACK_START_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.CALLBACK_START_TIMESTAMP] = \ f'{callback_name}/{COLUMN_NAME.CALLBACK_START_TIMESTAMP}' if COLUMN_NAME.CALLBACK_END_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.CALLBACK_END_TIMESTAMP] = \ f'{callback_name}/{COLUMN_NAME.CALLBACK_END_TIMESTAMP}' if COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP] = \ f'{topic_name}/{COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP}' if COLUMN_NAME.RCL_PUBLISH_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.RCL_PUBLISH_TIMESTAMP] = \ f'{topic_name}/{COLUMN_NAME.RCL_PUBLISH_TIMESTAMP}' if COLUMN_NAME.DDS_WRITE_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.DDS_WRITE_TIMESTAMP] = \ f'{topic_name}/{COLUMN_NAME.DDS_WRITE_TIMESTAMP}' if COLUMN_NAME.MESSAGE_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.MESSAGE_TIMESTAMP] = \ f'{topic_name}/{COLUMN_NAME.MESSAGE_TIMESTAMP}' if COLUMN_NAME.SOURCE_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.SOURCE_TIMESTAMP] = \ f'{topic_name}/{COLUMN_NAME.SOURCE_TIMESTAMP}' if COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP] = \ f'{topic_name}/{COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP}' if COLUMN_NAME.TILDE_MESSAGE_ID in records.columns: rename_dict[COLUMN_NAME.TILDE_MESSAGE_ID] = \ f'{topic_name}/{COLUMN_NAME.TILDE_MESSAGE_ID}' if COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP] = \ f'{topic_name}/{COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP}' records.rename_columns(rename_dict) class RecordsProviderLttngHelper: def __init__( self, lttng: Lttng ) -> None: from .bridge import LttngBridge self._bridge = LttngBridge(lttng) def get_callback_objects( self, callback: CallbackStructValue ) -> Tuple[int, Optional[int]]: if isinstance(callback, TimerCallbackStructValue): return self.get_timer_callback_object(callback), None if isinstance(callback, SubscriptionCallbackStructValue): obj = self.get_subscription_callback_object_inter(callback) obj_intra = self.get_subscription_callback_object_intra(callback) if obj_intra is not None: return obj, obj_intra return obj, None msg = 'Failed to get callback object. ' msg += f'{callback.callback_type.type_name} is not supported.' raise UnsupportedTypeError(msg) def get_timer_callback_object( self, callback: TimerCallbackStructValue ) -> int: callback_lttng = self._bridge.get_timer_callback(callback) return callback_lttng.callback_object def get_subscription_callback_objects( self, callback: SubscriptionCallbackStructValue ) -> Tuple[int, Optional[int]]: return self.get_callback_objects(callback) def get_subscription_callback_object_inter( self, callback: SubscriptionCallbackStructValue ) -> int: callback_lttng = self._bridge.get_subscription_callback(callback) return callback_lttng.callback_object def get_subscription_callback_object_intra( self, callback: SubscriptionCallbackStructValue ) -> Optional[int]: callback_lttng = self._bridge.get_subscription_callback(callback) return callback_lttng.callback_object_intra def get_tilde_subscription( self, callback: SubscriptionCallbackStructValue ) -> Optional[int]: callback_lttng = self._bridge.get_subscription_callback(callback) return callback_lttng.tilde_subscription def get_publisher_handles( self, publisher: PublisherStructValue ) -> List[int]: publisher_lttng = self._bridge.get_publishers(publisher) return [pub_info.publisher_handle for pub_info in publisher_lttng] def get_tilde_publishers( self, publisher_info: PublisherStructValue ) -> List[int]: publisher_lttng = self._bridge.get_publishers(publisher_info) publisher = [pub_info.tilde_publisher for pub_info in publisher_lttng if pub_info.tilde_publisher is not None] return publisher def get_lttng_publishers( self, publisher: PublisherStructValue ) -> List[PublisherValueLttng]: return self._bridge.get_publishers(publisher) def get_lttng_subscription( self, callback: SubscriptionCallbackStructValue ) -> SubscriptionCallbackValueLttng: return self._bridge.get_subscription_callback(callback) class NodeRecordsCallbackChain: def __init__( self, provider: RecordsProviderLttng, node_path: NodePathStructValue, ) -> None: self._provider = provider self._validate(node_path) self._val = node_path def to_records(self): chain_info = self._val.child if isinstance(chain_info[0], CallbackStructValue): cb_info = chain_info[0] records = self._provider.callback_records(cb_info) else: var_pass_info = chain_info[0] records = self._provider.variable_passing_records(var_pass_info) for chain_element in chain_info[1:]: if isinstance(chain_element, CallbackStructValue): records_ = self._provider.callback_records(chain_element) join_key = records_.columns[0] records = merge( left_records=records, right_records=records_, join_left_key=join_key, join_right_key=join_key, columns=Columns(records.columns + records_.columns), how='left', progress_label='binding: callback_start and callback end' ) continue if isinstance(chain_element, VariablePassingStructValue): records_ = self._provider.variable_passing_records(chain_element) join_key = records_.columns[0] records = merge( left_records=records, right_records=records_, join_left_key=join_key, join_right_key=join_key, columns=Columns(records.columns + records_.columns).as_list(), how='left', progress_label='binding: callback_end and callback start' ) continue last_element = chain_info[-1] if isinstance(last_element, CallbackStructValue) \ and self._val.publisher is not None: last_callback_end_name = Util.filter_items( lambda x: COLUMN_NAME.CALLBACK_END_TIMESTAMP in x, records.columns)[-1] records.drop_columns([last_callback_end_name]) last_callback_start_name = Util.filter_items( lambda x: COLUMN_NAME.CALLBACK_START_TIMESTAMP in x, records.columns)[-1] publish_records = self._provider.publish_records(self._val.publisher) publish_column = publish_records.columns[0] columns = records.columns + [publish_column] records = merge_sequencial( left_records=records, right_records=publish_records, join_left_key=None, join_right_key=None, left_stamp_key=last_callback_start_name, right_stamp_key=publish_column, columns=Columns(records.columns + publish_records.columns).as_list(), how='left', progress_label='binding: callback_start and publish', ) records.drop_columns(list(set(records.columns) - set(columns))) records.reindex(columns) return records @staticmethod def _validate( node_path: NodePathStructValue, ) -> None: if node_path.callbacks is None: raise UnsupportedNodeRecordsError('') if node_path.callbacks is None: raise UnsupportedNodeRecordsError('callback values is None.') if not isinstance(node_path.message_context, CallbackChain): msg = 'node_path.message context is not CallbackChain' raise UnsupportedNodeRecordsError(msg) head_callback = node_path.callbacks[0] tail_callback = node_path.callbacks[-1] if node_path.publish_topic_name is not None and \ tail_callback.publish_topic_names is not None and \ len(tail_callback.publish_topic_names) != 0 and \ node_path.publish_topic_name not in tail_callback.publish_topic_names: raise UnsupportedNodeRecordsError('') if node_path.subscribe_topic_name is not None and \ node_path.subscribe_topic_name != head_callback.subscribe_topic_name: raise UnsupportedNodeRecordsError('') class NodeRecordsInheritUniqueTimestamp: def __init__( self, provider: RecordsProviderLttng, node_path: NodePathStructValue, ) -> None: if node_path.message_context is None: raise UnsupportedNodeRecordsError('node_path.message context is None') if not isinstance(node_path.message_context, InheritUniqueStamp): msg = 'node_path.message context is not InheritUniqueStamp' raise UnsupportedNodeRecordsError(msg) self._provider = provider self._context: InheritUniqueStamp = node_path.message_context self._validate(node_path, self._context) self._node_path = node_path def to_records(self): sub_records = self._provider.subscribe_records(self._node_path.subscription) pub_records = self._provider.publish_records(self._node_path.publisher) columns = [ sub_records.columns[0], pub_records.columns[0], ] join_left_key = f'{self._node_path.subscribe_topic_name}/{COLUMN_NAME.MESSAGE_TIMESTAMP}' join_right_key = f'{self._node_path.publish_topic_name}/{COLUMN_NAME.MESSAGE_TIMESTAMP}' pub_sub_records = merge_sequencial( left_records=sub_records, right_records=pub_records, left_stamp_key=sub_records.columns[0], right_stamp_key=pub_records.columns[0], join_left_key=join_left_key, join_right_key=join_right_key, columns=Columns(sub_records.columns + pub_records.columns).as_list(), how='left_use_latest', progress_label='binding: inherit unique timestamp', ) drop_columns = list(set(pub_sub_records.columns) - set(columns)) pub_sub_records.drop_columns(drop_columns) pub_sub_records.reindex(columns) return pub_sub_records @staticmethod def _validate( node_path: NodePathStructValue, context: InheritUniqueStamp, ) -> None: def is_valid() -> bool: if context.publisher_topic_name != node_path.publish_topic_name: return False if context.subscription_topic_name != node_path.subscribe_topic_name: return False return True if is_valid(): return None msg = f'InheritUniqueStamp cannot build records. \n{node_path} \n{context}' raise UnsupportedNodeRecordsError(msg) class NodeRecordsUseLatestMessage: def __init__( self, provider: RecordsProviderLttng, node_path: NodePathStructValue, ) -> None: if node_path.message_context is None: raise UnsupportedNodeRecordsError('node_path.message context is None') if not isinstance(node_path.message_context, UseLatestMessage): raise UnsupportedNodeRecordsError('node_path.message context is not UseLatestMessage') self._provider = provider self._context: UseLatestMessage = node_path.message_context self._validate(node_path, self._context) self._node_path = node_path def to_records(self): sub_records = self._provider.subscribe_records(self._node_path.subscription) pub_records = self._provider.publish_records(self._node_path.publisher) columns = [ sub_records.columns[0], f'{self._node_path.publish_topic_name}/rclcpp_publish_timestamp', ] pub_sub_records = merge_sequencial( left_records=sub_records, right_records=pub_records, left_stamp_key=sub_records.columns[0], right_stamp_key=pub_records.columns[0], join_left_key=None, join_right_key=None, columns=Columns(sub_records.columns + pub_records.columns).as_list(), how='left_use_latest', progress_label='binding use_latest_message.' ) drop_columns = list(set(pub_sub_records.columns) - set(columns)) pub_sub_records.drop_columns(drop_columns) pub_sub_records.reindex(columns) return pub_sub_records @staticmethod def _validate( node_path: NodePathStructValue, context: UseLatestMessage, ) -> None: def is_valid() -> bool: if context.publisher_topic_name != node_path.publish_topic_name: return False if context.subscription_topic_name != node_path.subscribe_topic_name: return False return True if is_valid(): return None msg = f'UseLatest cannot build records. \n{node_path} \n{context}' raise UnsupportedNodeRecordsError(msg) class NodeRecordsTilde: def __init__( self, provider: RecordsProviderLttng, node_path: NodePathStructValue, ) -> None: if node_path.message_context is None: raise UnsupportedNodeRecordsError('node_path.message context is None') if not isinstance(node_path.message_context, Tilde): raise UnsupportedNodeRecordsError('node_path.message context is not UseLatestMessage') self._provider = provider self._context: MessageContext = node_path.message_context self._validate(node_path, self._context) self._node_path = node_path def to_records(self): tilde_records = self._provider.tilde_records( self._node_path.subscription, self._node_path.publisher) sub_records = self._provider.subscribe_records(self._node_path.subscription) pub_records = self._provider.publish_records(self._node_path.publisher) left_stamp_key = Util.find_one( lambda x: COLUMN_NAME.CALLBACK_START_TIMESTAMP in x, sub_records.columns) right_stamp_key = Util.find_one( lambda x: COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP in x, sub_records.columns) records = merge_sequencial( left_records=sub_records, right_records=tilde_records, left_stamp_key=left_stamp_key, right_stamp_key=right_stamp_key, join_left_key=None, join_right_key=None, columns=Columns(sub_records.columns + tilde_records.columns).as_list(), how='left', progress_label='binding tilde subscribe records.' ) left_stamp_key = Util.find_one( lambda x: COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP in x, records.columns) right_stamp_key = Util.find_one( lambda x: COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP in x, pub_records.columns) records = merge_sequencial( left_records=records, right_records=pub_records, left_stamp_key=left_stamp_key, right_stamp_key=right_stamp_key, join_left_key=None, join_right_key=None, columns=Columns(records.columns + pub_records.columns).as_list(), how='left', progress_label='binding tilde publish records.' ) columns = [ Util.find_one(lambda x: COLUMN_NAME.CALLBACK_START_TIMESTAMP in x, records.columns), Util.find_one(lambda x: COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP in x, records.columns), ] drop_columns = list(set(records.columns) - set(columns)) records.drop_columns(drop_columns) records.reindex(columns) return records @staticmethod def _validate( node_path: NodePathStructValue, context: MessageContext, ) -> None: def is_valid() -> bool: if not isinstance(context, Tilde): return False if context.publisher_topic_name != node_path.publish_topic_name: return False if context.subscription_topic_name != node_path.subscribe_topic_name: return False return True if is_valid(): return None msg = f'UseLatest cannot build records. \n{node_path} \n{context}' raise UnsupportedNodeRecordsError(msg) class FilteredRecordsSource: def __init__(self, lttng: Lttng): self._lttng = lttng def tilde_subscribe_records( self, tilde_subscription: int ) -> RecordsInterface: sub_records = RecordsFactory.create_instance( None, [ COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP, COLUMN_NAME.TILDE_SUBSCRIPTION, COLUMN_NAME.TILDE_MESSAGE_ID ] ) if tilde_subscription is not None and \ tilde_subscription in self._grouped_tilde_sub_records: sub_records_ = self._grouped_tilde_sub_records[tilde_subscription].clone() sub_records.concat(sub_records_) sub_records.drop_columns([COLUMN_NAME.TILDE_SUBSCRIPTION]) return sub_records def sub_records( self, inter_callback_object: int, intra_callback_object: Optional[int] ) -> RecordsInterface: sub_records = RecordsFactory.create_instance( None, [ COLUMN_NAME.CALLBACK_START_TIMESTAMP, COLUMN_NAME.MESSAGE_TIMESTAMP, COLUMN_NAME.SOURCE_TIMESTAMP, ] ) records = self._grouped_sub_records if inter_callback_object in records: sub_records.concat(records[inter_callback_object].clone()) if intra_callback_object is not None and intra_callback_object in records: intra_sub_records = records[intra_callback_object].clone() sub_records.concat(intra_sub_records) sub_records.sort(COLUMN_NAME.CALLBACK_START_TIMESTAMP) return sub_records def inter_comm_records( self, publisher_handles: List[int], callback_object: int ) -> RecordsInterface: records = RecordsFactory.create_instance( None, [ COLUMN_NAME.CALLBACK_OBJECT, COLUMN_NAME.CALLBACK_START_TIMESTAMP, COLUMN_NAME.PUBLISHER_HANDLE, COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP, COLUMN_NAME.RCL_PUBLISH_TIMESTAMP, COLUMN_NAME.DDS_WRITE_TIMESTAMP ] ) for publisher_handle in publisher_handles: key = (callback_object, publisher_handle) if key in self._grouped_inter_comm_records: comm_records = self._grouped_inter_comm_records[key].clone() records.concat(comm_records) records.sort(COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP) return records def intra_comm_records( self, publisher_handles: List[int], intra_callback_object: Optional[int] ) -> RecordsInterface: records = RecordsFactory.create_instance( None, [ COLUMN_NAME.CALLBACK_OBJECT, COLUMN_NAME.CALLBACK_START_TIMESTAMP, COLUMN_NAME.PUBLISHER_HANDLE, COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP, COLUMN_NAME.MESSAGE_TIMESTAMP ] ) if intra_callback_object is not None: for publisher_handle in publisher_handles: key = (intra_callback_object, publisher_handle) if key in self._grouped_intra_comm_records: records_ = self._grouped_intra_comm_records[key].clone() records.concat(records_) records.sort(COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP) return records def publish_records( self, publisher_handles: List[int], ) -> RecordsInterface: records = self._grouped_publish_records pub_records = RecordsFactory.create_instance( None, [ COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP, COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP, COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP, COLUMN_NAME.RCL_PUBLISH_TIMESTAMP, COLUMN_NAME.DDS_WRITE_TIMESTAMP, COLUMN_NAME.MESSAGE_TIMESTAMP, COLUMN_NAME.SOURCE_TIMESTAMP, COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP, COLUMN_NAME.TILDE_MESSAGE_ID, ] ) for publisher_handle in publisher_handles: if publisher_handle in records: inter_pub_records = records[publisher_handle].clone() pub_records.concat(inter_pub_records) return pub_records def tilde_publish_records( self, tilde_publishers: Sequence[int] ) -> RecordsInterface: tilde_grouped_records = self._grouped_tilde_pub_records tilde_records = RecordsFactory.create_instance( None, [ COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP, COLUMN_NAME.TILDE_PUBLISHER, COLUMN_NAME.TILDE_MESSAGE_ID, COLUMN_NAME.TILDE_SUBSCRIPTION, ]) for tilde_publisher in tilde_publishers: if tilde_publisher in tilde_grouped_records: tilde_records_ = tilde_grouped_records[tilde_publisher].clone() tilde_records.concat(tilde_records_) tilde_records.drop_columns([COLUMN_NAME.TILDE_PUBLISHER]) return tilde_records def _expand_key_tuple( self, group: Dict[Tuple[int, ...], RecordsInterface] ) -> Dict[int, RecordsInterface]: group_: Dict[int, RecordsInterface] = {} for key in group.keys(): assert len(key) == 1 group_[key[0]] = group[key] return group_ def callback_records( self, inter_callback_object: int, intra_callback_object: Optional[int] ) -> RecordsInterface: records = self._grouped_callback_records callback_records = RecordsFactory.create_instance( None, [COLUMN_NAME.CALLBACK_START_TIMESTAMP, COLUMN_NAME.CALLBACK_END_TIMESTAMP] ) if inter_callback_object in records: inter_callback_records = records[inter_callback_object].clone() callback_records.concat(inter_callback_records) if intra_callback_object is not None and intra_callback_object in records: intra_callback_records = records[intra_callback_object].clone() callback_records.concat(intra_callback_records) callback_records.sort(COLUMN_NAME.CALLBACK_START_TIMESTAMP) return callback_records @cached_property def _grouped_callback_records(self) -> Dict[int, RecordsInterface]: records = self._lttng.compose_callback_records() group = records.groupby([COLUMN_NAME.CALLBACK_OBJECT]) return self._expand_key_tuple(group) @cached_property def _grouped_inter_comm_records(self) -> Dict[Tuple[int, ...], RecordsInterface]: records = self._lttng.compose_inter_proc_comm_records() return records.groupby([COLUMN_NAME.CALLBACK_OBJECT, COLUMN_NAME.PUBLISHER_HANDLE]) @cached_property def _grouped_intra_comm_records(self) -> Dict[Tuple[int, ...], RecordsInterface]: records = self._lttng.compose_intra_proc_comm_records() return records.groupby([COLUMN_NAME.CALLBACK_OBJECT, COLUMN_NAME.PUBLISHER_HANDLE]) @cached_property def _grouped_publish_records(self) -> Dict[int, RecordsInterface]: records = self._lttng.compose_publish_records() group = records.groupby([COLUMN_NAME.PUBLISHER_HANDLE]) return self._expand_key_tuple(group) @cached_property def _grouped_sub_records(self) -> Dict[int, RecordsInterface]: records = self._lttng.compose_subscribe_records() group = records.groupby([COLUMN_NAME.CALLBACK_OBJECT]) return self._expand_key_tuple(group) @cached_property def _grouped_tilde_pub_records(self) -> Dict[int, RecordsInterface]: records = self._lttng.compose_tilde_publish_records() group = records.groupby([COLUMN_NAME.TILDE_PUBLISHER]) return self._expand_key_tuple(group) @cached_property def _grouped_tilde_sub_records(self) -> Dict[int, RecordsInterface]: records = self._lttng.compose_tilde_subscribe_records() group = records.groupby([COLUMN_NAME.TILDE_SUBSCRIPTION]) return self._expand_key_tuple(group)
true
true
f7026a0eac2c1ee044ec2734f8fee2a9493d727c
2,977
py
Python
hexahue_map.py
kusuwada/hexahue
1c41a51fb40dbbfe24cf2c4d394808f932be6bb5
[ "MIT" ]
null
null
null
hexahue_map.py
kusuwada/hexahue
1c41a51fb40dbbfe24cf2c4d394808f932be6bb5
[ "MIT" ]
null
null
null
hexahue_map.py
kusuwada/hexahue
1c41a51fb40dbbfe24cf2c4d394808f932be6bb5
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding:utf-8 -*- import yaml class HexahueMap(): def __init__(self, space_color): pink = (255, 0, 255) red = (255, 0, 0) green = (0, 255, 0) yellow = (255, 255, 0) blue = (0, 0, 255) sky = (0, 255, 255) white = (255, 255, 255) gray = (128, 128, 128) black = (0, 0, 0) self.hmap = {} self.hmap[(pink, red, green, yellow, blue, sky)] = 'A' self.hmap[(red, pink, green, yellow, blue, sky)] = 'B' self.hmap[(red, green, pink, yellow, blue, sky)] = 'C' self.hmap[(red, green, yellow, pink, blue, sky)] = 'D' self.hmap[(red, green, yellow, blue, pink, sky)] = 'E' self.hmap[(red, green, yellow, blue, sky, pink)] = 'F' self.hmap[(green, red, yellow, blue, sky, pink)] = 'G' self.hmap[(green, yellow, red, blue, sky, pink)] = 'H' self.hmap[(green, yellow, blue, red, sky, pink)] = 'I' self.hmap[(green, yellow, blue, sky, red, pink)] = 'J' self.hmap[(green, yellow, blue, sky, pink, red)] = 'K' self.hmap[(yellow, green, blue, sky, pink, red)] = 'L' self.hmap[(yellow, blue, green, sky, pink, red)] = 'M' self.hmap[(yellow, blue, sky, green, pink, red)] = 'N' self.hmap[(yellow, blue, sky, pink, green, red)] = 'O' self.hmap[(yellow, blue, sky, pink, red, green)] = 'P' self.hmap[(blue, yellow, sky, pink, red, green)] = 'Q' self.hmap[(blue, sky, yellow, pink, red, green)] = 'R' self.hmap[(blue, sky, pink, yellow, red, green)] = 'S' self.hmap[(blue, sky, pink, red, yellow, green)] = 'T' self.hmap[(blue, sky, pink, red, green, yellow)] = 'U' self.hmap[(sky, blue, pink, red, green, yellow)] = 'V' self.hmap[(sky, pink, blue, red, green, yellow)] = 'W' self.hmap[(sky, pink, red, blue, green, yellow)] = 'X' self.hmap[(sky, pink, red, green, blue, yellow)] = 'Y' self.hmap[(sky, pink, red, green, yellow, blue)] = 'Z' self.hmap[(black, white, white, black, black, white)] = '.' self.hmap[(white, black, black, white, white, black)] = ',' if space_color == 'black': self.hmap[(black, black, black, black, black, black)] = ' ' elif space_color == 'white': self.hmap[(white, white, white, white, white, white)] = ' ' elif space_color == 'all': self.hmap[(black, black, black, black, black, black)] = ' ' self.hmap[(white, white, white, white, white, white)] = ' ' else: raise Exception('[Error] invalid space setting: ' + space_color) self.hmap[(black, gray, white, black, gray, white)] = '0' self.hmap[(gray, black, white, black, gray, white)] = '1' self.hmap[(gray, white, black, black, gray, white)] = '2' self.hmap[(gray, white, black, gray, black, white)] = '3' self.hmap[(gray, white, black, gray, white, black)] = '4' self.hmap[(white, gray, black, gray, white, black)] = '5' self.hmap[(white, black, gray, gray, white, black)] = '6' self.hmap[(white, black, gray, white, gray, black)] = '7' self.hmap[(white, black, gray, white, black, gray)] = '8' self.hmap[(black, white, gray, white, black, gray)] = '9'
45.106061
67
0.59523
import yaml class HexahueMap(): def __init__(self, space_color): pink = (255, 0, 255) red = (255, 0, 0) green = (0, 255, 0) yellow = (255, 255, 0) blue = (0, 0, 255) sky = (0, 255, 255) white = (255, 255, 255) gray = (128, 128, 128) black = (0, 0, 0) self.hmap = {} self.hmap[(pink, red, green, yellow, blue, sky)] = 'A' self.hmap[(red, pink, green, yellow, blue, sky)] = 'B' self.hmap[(red, green, pink, yellow, blue, sky)] = 'C' self.hmap[(red, green, yellow, pink, blue, sky)] = 'D' self.hmap[(red, green, yellow, blue, pink, sky)] = 'E' self.hmap[(red, green, yellow, blue, sky, pink)] = 'F' self.hmap[(green, red, yellow, blue, sky, pink)] = 'G' self.hmap[(green, yellow, red, blue, sky, pink)] = 'H' self.hmap[(green, yellow, blue, red, sky, pink)] = 'I' self.hmap[(green, yellow, blue, sky, red, pink)] = 'J' self.hmap[(green, yellow, blue, sky, pink, red)] = 'K' self.hmap[(yellow, green, blue, sky, pink, red)] = 'L' self.hmap[(yellow, blue, green, sky, pink, red)] = 'M' self.hmap[(yellow, blue, sky, green, pink, red)] = 'N' self.hmap[(yellow, blue, sky, pink, green, red)] = 'O' self.hmap[(yellow, blue, sky, pink, red, green)] = 'P' self.hmap[(blue, yellow, sky, pink, red, green)] = 'Q' self.hmap[(blue, sky, yellow, pink, red, green)] = 'R' self.hmap[(blue, sky, pink, yellow, red, green)] = 'S' self.hmap[(blue, sky, pink, red, yellow, green)] = 'T' self.hmap[(blue, sky, pink, red, green, yellow)] = 'U' self.hmap[(sky, blue, pink, red, green, yellow)] = 'V' self.hmap[(sky, pink, blue, red, green, yellow)] = 'W' self.hmap[(sky, pink, red, blue, green, yellow)] = 'X' self.hmap[(sky, pink, red, green, blue, yellow)] = 'Y' self.hmap[(sky, pink, red, green, yellow, blue)] = 'Z' self.hmap[(black, white, white, black, black, white)] = '.' self.hmap[(white, black, black, white, white, black)] = ',' if space_color == 'black': self.hmap[(black, black, black, black, black, black)] = ' ' elif space_color == 'white': self.hmap[(white, white, white, white, white, white)] = ' ' elif space_color == 'all': self.hmap[(black, black, black, black, black, black)] = ' ' self.hmap[(white, white, white, white, white, white)] = ' ' else: raise Exception('[Error] invalid space setting: ' + space_color) self.hmap[(black, gray, white, black, gray, white)] = '0' self.hmap[(gray, black, white, black, gray, white)] = '1' self.hmap[(gray, white, black, black, gray, white)] = '2' self.hmap[(gray, white, black, gray, black, white)] = '3' self.hmap[(gray, white, black, gray, white, black)] = '4' self.hmap[(white, gray, black, gray, white, black)] = '5' self.hmap[(white, black, gray, gray, white, black)] = '6' self.hmap[(white, black, gray, white, gray, black)] = '7' self.hmap[(white, black, gray, white, black, gray)] = '8' self.hmap[(black, white, gray, white, black, gray)] = '9'
true
true
f7026a4ae9f688521225dfaaa410970f7fe1bab1
90
py
Python
lesson_tasks/lesson6/num1.7.py
NikaEgorova/goiteens-python3-egorova
809059abf3464cbe4f0f116e52ce53534a0ca5ba
[ "MIT" ]
null
null
null
lesson_tasks/lesson6/num1.7.py
NikaEgorova/goiteens-python3-egorova
809059abf3464cbe4f0f116e52ce53534a0ca5ba
[ "MIT" ]
null
null
null
lesson_tasks/lesson6/num1.7.py
NikaEgorova/goiteens-python3-egorova
809059abf3464cbe4f0f116e52ce53534a0ca5ba
[ "MIT" ]
null
null
null
# 7 завдання for n in range(1, 101): print(n, "Я не буду їсти палички Бобо на уроці")
22.5
52
0.655556
for n in range(1, 101): print(n, "Я не буду їсти палички Бобо на уроці")
true
true
f7026b9e1ac955c01abf85c34e1a1742cdc43c1d
6,915
py
Python
SoftLayer/managers/ordering.py
corneil/softlayer-python
bbaf562fb76536c5cc652e356729723f38f48b66
[ "MIT" ]
1
2019-11-06T13:54:07.000Z
2019-11-06T13:54:07.000Z
SoftLayer/managers/ordering.py
underscorephil/softlayer-python
567540a328d5258e55594466127cd22b9a04a2ea
[ "MIT" ]
null
null
null
SoftLayer/managers/ordering.py
underscorephil/softlayer-python
567540a328d5258e55594466127cd22b9a04a2ea
[ "MIT" ]
1
2020-07-07T12:18:26.000Z
2020-07-07T12:18:26.000Z
""" SoftLayer.ordering ~~~~~~~~~~~~~~~~~~ Ordering Manager :license: MIT, see LICENSE for more details. """ class OrderingManager(object): """Manages hardware devices. :param SoftLayer.API.Client client: an API client instance """ def __init__(self, client): self.client = client def get_packages_of_type(self, package_types, mask=None): """Get packages that match a certain type. Each ordering package has a type, so return all packages that match the types we are looking for :param list package_types: List of strings representing the package type keynames we are interested in. :param string mask: Mask to specify the properties we want to retrieve """ package_service = self.client['Product_Package'] _filter = { 'type': { 'keyName': { 'operation': 'in', 'options': [ {'name': 'data', 'value': package_types} ], }, }, } packages = package_service.getAllObjects(mask=mask, filter=_filter) packages = self.filter_outlet_packages(packages) return packages @staticmethod def filter_outlet_packages(packages): """Remove packages designated as OUTLET. Those type of packages must be handled in a different way, and they are not supported at the moment. :param packages: Dictionary of packages. Name and description keys must be present in each of them. """ non_outlet_packages = [] for package in packages: if all(['OUTLET' not in package.get('description', '').upper(), 'OUTLET' not in package.get('name', '').upper()]): non_outlet_packages.append(package) return non_outlet_packages @staticmethod def get_only_active_packages(packages): """Return only active packages. If a package is active, it is eligible for ordering This will inspect the 'isActive' property on the provided packages :param packages Dictionary of packages, isActive key must be present """ active_packages = [] for package in packages: if package['isActive']: active_packages.append(package) return active_packages def get_package_by_type(self, package_type, mask=None): """Get a single package of a given type. Syntactic sugar to retrieve a single package of a given type. If multiple packages share the given type, this will return the first one returned by the API. If no packages are found, returns None :param package_type string representing the package type key name we are interested in """ packages = self.get_packages_of_type([package_type], mask) if len(packages) == 0: return None else: return packages.pop() def get_package_id_by_type(self, package_type): """Return the package ID of a Product Package with a given type. :param package_type string representing the package type key name we are interested in :raises ValueError when no package of the given type is found """ mask = "mask[id, name, description, isActive, type[keyName]]" package = self.get_package_by_type(package_type, mask) if package: return package['id'] else: raise ValueError("No package found for type: " + package_type) def get_quotes(self): """Retrieve a list of quotes. :return a list of SoftLayer_Billing_Order_Quote """ quotes = self.client['Account'].getActiveQuotes() return quotes def get_quote_details(self, quote_id): """Retrieve quote details. :param quote_id ID number of target quote """ quote = self.client['Billing_Order_Quote'].getObject(id=quote_id) return quote def get_order_container(self, quote_id): """Generate an order container from a quote object. :param quote_id ID number of target quote """ quote = self.client['Billing_Order_Quote'] container = quote.getRecalculatedOrderContainer(id=quote_id) return container['orderContainers'][0] def generate_order_template(self, quote_id, extra, quantity=1): """Generate a complete order template. :param int quote_id: ID of target quote :param list extra: List of dictionaries that have extra details about the order such as hostname or domain names for virtual servers or hardware nodes :param int quantity: Number of ~things~ to order """ container = self.get_order_container(quote_id) container['quantity'] = quantity # NOTE(kmcdonald): This will only work with virtualGuests and hardware. # There has to be a better way, since this is based on # an existing quote that supposedly knows about this # detail if container['packageId'] == 46: product_type = 'virtualGuests' else: product_type = 'hardware' if len(extra) != quantity: raise ValueError("You must specify extra for each server in the " "quote") container[product_type] = [] for extra_details in extra: container[product_type].append(extra_details) container['presetId'] = None return container def verify_quote(self, quote_id, extra, quantity=1): """Verifies that a quote order is valid. :param int quote_id: ID for the target quote :param list hostnames: hostnames of the servers :param string domain: domain of the new servers :param int quantity: Quantity to override default """ container = self.generate_order_template(quote_id, extra, quantity=quantity) return self.client['Product_Order'].verifyOrder(container) def order_quote(self, quote_id, extra, quantity=1): """Places an order using a quote :param int quote_id: ID for the target quote :param list hostnames: hostnames of the servers :param string domain: domain of the new server :param int quantity: Quantity to override default """ container = self.generate_order_template(quote_id, extra, quantity=quantity) return self.client['Product_Order'].placeOrder(container)
34.232673
79
0.599566
class OrderingManager(object): def __init__(self, client): self.client = client def get_packages_of_type(self, package_types, mask=None): package_service = self.client['Product_Package'] _filter = { 'type': { 'keyName': { 'operation': 'in', 'options': [ {'name': 'data', 'value': package_types} ], }, }, } packages = package_service.getAllObjects(mask=mask, filter=_filter) packages = self.filter_outlet_packages(packages) return packages @staticmethod def filter_outlet_packages(packages): non_outlet_packages = [] for package in packages: if all(['OUTLET' not in package.get('description', '').upper(), 'OUTLET' not in package.get('name', '').upper()]): non_outlet_packages.append(package) return non_outlet_packages @staticmethod def get_only_active_packages(packages): active_packages = [] for package in packages: if package['isActive']: active_packages.append(package) return active_packages def get_package_by_type(self, package_type, mask=None): packages = self.get_packages_of_type([package_type], mask) if len(packages) == 0: return None else: return packages.pop() def get_package_id_by_type(self, package_type): mask = "mask[id, name, description, isActive, type[keyName]]" package = self.get_package_by_type(package_type, mask) if package: return package['id'] else: raise ValueError("No package found for type: " + package_type) def get_quotes(self): quotes = self.client['Account'].getActiveQuotes() return quotes def get_quote_details(self, quote_id): quote = self.client['Billing_Order_Quote'].getObject(id=quote_id) return quote def get_order_container(self, quote_id): quote = self.client['Billing_Order_Quote'] container = quote.getRecalculatedOrderContainer(id=quote_id) return container['orderContainers'][0] def generate_order_template(self, quote_id, extra, quantity=1): container = self.get_order_container(quote_id) container['quantity'] = quantity if container['packageId'] == 46: product_type = 'virtualGuests' else: product_type = 'hardware' if len(extra) != quantity: raise ValueError("You must specify extra for each server in the " "quote") container[product_type] = [] for extra_details in extra: container[product_type].append(extra_details) container['presetId'] = None return container def verify_quote(self, quote_id, extra, quantity=1): container = self.generate_order_template(quote_id, extra, quantity=quantity) return self.client['Product_Order'].verifyOrder(container) def order_quote(self, quote_id, extra, quantity=1): container = self.generate_order_template(quote_id, extra, quantity=quantity) return self.client['Product_Order'].placeOrder(container)
true
true
f7026cd4a1731d33e95675f39f860e7e24cf332d
433
py
Python
BOJ/review/boj_14501.py
mrbartrns/swacademy_structure
778f0546030385237c383d81ec37d5bd9ed1272d
[ "MIT" ]
null
null
null
BOJ/review/boj_14501.py
mrbartrns/swacademy_structure
778f0546030385237c383d81ec37d5bd9ed1272d
[ "MIT" ]
null
null
null
BOJ/review/boj_14501.py
mrbartrns/swacademy_structure
778f0546030385237c383d81ec37d5bd9ed1272d
[ "MIT" ]
null
null
null
# BOJ 14501 import sys si = sys.stdin.readline t = [0] * 17 dp = [0] * 17 n = int(si()) for i in range(1, n + 1): m, o = map(int, si().split()) t[i] = m dp[i] = o def solve(n): ans = 0 for i in range(n, 0, -1): if i + t[i] > n + 1: dp[i] = dp[i + 1] else: dp[i] = max(dp[i + 1], dp[i] + dp[i + t[i]]) ans = max(ans, dp[i]) return ans print(solve(n))
15.464286
56
0.418014
import sys si = sys.stdin.readline t = [0] * 17 dp = [0] * 17 n = int(si()) for i in range(1, n + 1): m, o = map(int, si().split()) t[i] = m dp[i] = o def solve(n): ans = 0 for i in range(n, 0, -1): if i + t[i] > n + 1: dp[i] = dp[i + 1] else: dp[i] = max(dp[i + 1], dp[i] + dp[i + t[i]]) ans = max(ans, dp[i]) return ans print(solve(n))
true
true
f7026cf1cb17c940ba14868190ef89a631252ceb
1,663
py
Python
data/base_dataset.py
jaxondl/Recycle-GAN
d107d97e964d9d3921f36fe3c7837886f0f82eda
[ "MIT" ]
null
null
null
data/base_dataset.py
jaxondl/Recycle-GAN
d107d97e964d9d3921f36fe3c7837886f0f82eda
[ "MIT" ]
null
null
null
data/base_dataset.py
jaxondl/Recycle-GAN
d107d97e964d9d3921f36fe3c7837886f0f82eda
[ "MIT" ]
null
null
null
import torch.utils.data as data from PIL import Image import torchvision.transforms as transforms from torchvision.transforms import InterpolationMode class BaseDataset(data.Dataset): def __init__(self): super(BaseDataset, self).__init__() def name(self): return 'BaseDataset' def initialize(self, opt): pass def get_transform(opt): transform_list = [] if opt.resize_or_crop == 'resize_and_crop': osize = [opt.loadSize, opt.loadSize] transform_list.append(transforms.Resize(osize, InterpolationMode.BICUBIC)) transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'crop': transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'scale_width': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.fineSize))) elif opt.resize_or_crop == 'scale_width_and_crop': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.loadSize))) transform_list.append(transforms.RandomCrop(opt.fineSize)) if opt.isTrain and not opt.no_flip: transform_list.append(transforms.RandomHorizontalFlip()) transform_list += [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] return transforms.Compose(transform_list) def __scale_width(img, target_width): ow, oh = img.size if (ow == target_width): return img w = target_width h = int(target_width * oh / ow) return img.resize((w, h), Image.BICUBIC)
35.382979
82
0.669874
import torch.utils.data as data from PIL import Image import torchvision.transforms as transforms from torchvision.transforms import InterpolationMode class BaseDataset(data.Dataset): def __init__(self): super(BaseDataset, self).__init__() def name(self): return 'BaseDataset' def initialize(self, opt): pass def get_transform(opt): transform_list = [] if opt.resize_or_crop == 'resize_and_crop': osize = [opt.loadSize, opt.loadSize] transform_list.append(transforms.Resize(osize, InterpolationMode.BICUBIC)) transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'crop': transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'scale_width': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.fineSize))) elif opt.resize_or_crop == 'scale_width_and_crop': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.loadSize))) transform_list.append(transforms.RandomCrop(opt.fineSize)) if opt.isTrain and not opt.no_flip: transform_list.append(transforms.RandomHorizontalFlip()) transform_list += [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] return transforms.Compose(transform_list) def __scale_width(img, target_width): ow, oh = img.size if (ow == target_width): return img w = target_width h = int(target_width * oh / ow) return img.resize((w, h), Image.BICUBIC)
true
true
f7026dc30b9cca1f621e1e1f41f734e3e544092c
203
py
Python
tenx/apps.py
bpotvin-bccrc/colossus
fa5ca7ce4cfe794c7d2167acb868aa9167988941
[ "MIT" ]
2
2018-10-03T16:05:14.000Z
2019-03-08T23:01:29.000Z
tenx/apps.py
bpotvin-bccrc/colossus
fa5ca7ce4cfe794c7d2167acb868aa9167988941
[ "MIT" ]
3
2019-05-09T22:48:22.000Z
2020-06-05T18:52:05.000Z
tenx/apps.py
bpotvin-bccrc/colossus
fa5ca7ce4cfe794c7d2167acb868aa9167988941
[ "MIT" ]
4
2018-08-16T22:25:10.000Z
2021-02-19T16:10:15.000Z
""" Created Oct 19, 2017 @author: Spencer Vatrt-Watts (github.com/Spenca) """ from __future__ import unicode_literals from django.apps import AppConfig class TenxConfig(AppConfig): name = 'tenx'
15.615385
48
0.743842
from __future__ import unicode_literals from django.apps import AppConfig class TenxConfig(AppConfig): name = 'tenx'
true
true
f7026deba14677cec6bc3070ab5690249babe22c
5,966
py
Python
affine_cipher.py
premnagdeo/Cryptography
061b6557dd94d88ce9c669af8d6a6a414a834adc
[ "MIT" ]
1
2020-08-07T18:35:45.000Z
2020-08-07T18:35:45.000Z
affine_cipher.py
premnagdeo/Cryptography
061b6557dd94d88ce9c669af8d6a6a414a834adc
[ "MIT" ]
null
null
null
affine_cipher.py
premnagdeo/Cryptography
061b6557dd94d88ce9c669af8d6a6a414a834adc
[ "MIT" ]
null
null
null
import pyperclip import math class Affine_Cipher: def __init__(self): self.SYMBOLS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890 !?.' def check_key(self, key): keyA = key // len(self.SYMBOLS) keyB = key % len(self.SYMBOLS) # Weak Key Checks if keyA == 1: print('Cipher is weak if key A is 1. Choose a different key.') return False if keyB == 0: print('Cipher is weak if key B is 0. Choose a different key.') return False if keyA < 0 or keyB < 0 or keyB > len(self.SYMBOLS) - 1: print('Key A must be greater than 0 and Key B must be between 0 and {}.'.format(len(self.SYMBOLS) - 1)) return False if math.gcd(keyA, len(self.SYMBOLS)) != 1: print("Key A {} and the symbol set size {} are not relatively prime. Choose a different key.".format(keyA, len(self.SYMBOLS))) return False return True def mod_inv(self, a, m): if math.gcd(a, m) != 1: return False u1, u2, u3 = 1, 0, a v1, v2, v3 = 0, 1, m while v3 != 0: q = u3 // v3 v1, v2, v3, u1, u2, u3 = (u1 - q * v1), (u2 - q * v2), (u3 - q * v3), v1, v2, v3 return u1 % m def encrypt(self, plain_text, key): keyA = key // len(self.SYMBOLS) keyB = key % len(self.SYMBOLS) cipher_text = [] for char in plain_text: if char in self.SYMBOLS: index = self.SYMBOLS.find(char) cipher_text.append(self.SYMBOLS[(index * keyA + keyB) % len(self.SYMBOLS)]) else: cipher_text.append(char) return "".join(cipher_text) def decrypt(self, cipher_text, key): keyA = key // len(self.SYMBOLS) keyB = key % len(self.SYMBOLS) mod_inverse = self.mod_inv(keyA, len(self.SYMBOLS)) if mod_inverse == False: print("MOD INV FALSE") plain_text = [] for char in cipher_text: if char in self.SYMBOLS: index = self.SYMBOLS.find(char) plain_text.append(self.SYMBOLS[(index - keyB) * mod_inverse % len(self.SYMBOLS)]) else: plain_text.append(char) return "".join(plain_text) def brute_force_decrypt(self, cipher_text): for key in range(len(self.SYMBOLS) ** 2): keyA = key // len(self.SYMBOLS) if math.gcd(keyA, len(self.SYMBOLS)) != 1: continue decrypted_text = self.decrypt(cipher_text, key) print("Key = {}, Plain text = {}".format(key, decrypted_text)) return None def ask_user(): print("Select an option:") print("1. To continue") print("2. To exit") option = input() return option if __name__ == "__main__": affine_cipher = Affine_Cipher() while True: try: print("Select an option:") print("1. Encrypt a message") print("2. Decrypt a message") option = input() if option == '1': print("Enter plain text to be encrypted: ") plain_text = input() print("Enter a number (key) for encryption: ") key = int(input()) while affine_cipher.check_key(key) == False: print("Enter the new key for encryption: ") key = int(input()) cipher_text = affine_cipher.encrypt(plain_text, key) print("Cipher text =", cipher_text) pyperclip.copy(cipher_text) pyperclip.paste() print("The cipher text has been copied to your clipboard" + "\n") option = ask_user() if option == '1': continue elif option == '2': break else: print("Incorrect input.") print("Exiting program") break elif option == '2': print("Enter cipher text to be decrypted: ") cipher_text = input() print("Enter key for decryption: ") print("If you do not know the key and would like to brute force the combinations, enter the word - crack") key = input() if key == 'crack': affine_cipher.brute_force_decrypt(cipher_text) else: key = int(key) plain_text = affine_cipher.decrypt(cipher_text, key) print("Plain text =", plain_text) pyperclip.copy(plain_text) pyperclip.paste() print("The plain text has been copied to your clipboard" + "\n") option = ask_user() if option == '1': continue elif option == '2': print("Exiting program") break else: print("Incorrect input.") print("Exiting program") break else: print("Incorrect input.") option = ask_user() if option == '1': continue elif option == '2': print("Exiting program") break else: print("Incorrect input.") print("Exiting program") break except Exception as e: option = ask_user() if option == '1': continue elif option == '2': print("Exiting program") break else: print("Incorrect input.") print("Exiting program") break
32.075269
138
0.483741
import pyperclip import math class Affine_Cipher: def __init__(self): self.SYMBOLS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890 !?.' def check_key(self, key): keyA = key // len(self.SYMBOLS) keyB = key % len(self.SYMBOLS) if keyA == 1: print('Cipher is weak if key A is 1. Choose a different key.') return False if keyB == 0: print('Cipher is weak if key B is 0. Choose a different key.') return False if keyA < 0 or keyB < 0 or keyB > len(self.SYMBOLS) - 1: print('Key A must be greater than 0 and Key B must be between 0 and {}.'.format(len(self.SYMBOLS) - 1)) return False if math.gcd(keyA, len(self.SYMBOLS)) != 1: print("Key A {} and the symbol set size {} are not relatively prime. Choose a different key.".format(keyA, len(self.SYMBOLS))) return False return True def mod_inv(self, a, m): if math.gcd(a, m) != 1: return False u1, u2, u3 = 1, 0, a v1, v2, v3 = 0, 1, m while v3 != 0: q = u3 // v3 v1, v2, v3, u1, u2, u3 = (u1 - q * v1), (u2 - q * v2), (u3 - q * v3), v1, v2, v3 return u1 % m def encrypt(self, plain_text, key): keyA = key // len(self.SYMBOLS) keyB = key % len(self.SYMBOLS) cipher_text = [] for char in plain_text: if char in self.SYMBOLS: index = self.SYMBOLS.find(char) cipher_text.append(self.SYMBOLS[(index * keyA + keyB) % len(self.SYMBOLS)]) else: cipher_text.append(char) return "".join(cipher_text) def decrypt(self, cipher_text, key): keyA = key // len(self.SYMBOLS) keyB = key % len(self.SYMBOLS) mod_inverse = self.mod_inv(keyA, len(self.SYMBOLS)) if mod_inverse == False: print("MOD INV FALSE") plain_text = [] for char in cipher_text: if char in self.SYMBOLS: index = self.SYMBOLS.find(char) plain_text.append(self.SYMBOLS[(index - keyB) * mod_inverse % len(self.SYMBOLS)]) else: plain_text.append(char) return "".join(plain_text) def brute_force_decrypt(self, cipher_text): for key in range(len(self.SYMBOLS) ** 2): keyA = key // len(self.SYMBOLS) if math.gcd(keyA, len(self.SYMBOLS)) != 1: continue decrypted_text = self.decrypt(cipher_text, key) print("Key = {}, Plain text = {}".format(key, decrypted_text)) return None def ask_user(): print("Select an option:") print("1. To continue") print("2. To exit") option = input() return option if __name__ == "__main__": affine_cipher = Affine_Cipher() while True: try: print("Select an option:") print("1. Encrypt a message") print("2. Decrypt a message") option = input() if option == '1': print("Enter plain text to be encrypted: ") plain_text = input() print("Enter a number (key) for encryption: ") key = int(input()) while affine_cipher.check_key(key) == False: print("Enter the new key for encryption: ") key = int(input()) cipher_text = affine_cipher.encrypt(plain_text, key) print("Cipher text =", cipher_text) pyperclip.copy(cipher_text) pyperclip.paste() print("The cipher text has been copied to your clipboard" + "\n") option = ask_user() if option == '1': continue elif option == '2': break else: print("Incorrect input.") print("Exiting program") break elif option == '2': print("Enter cipher text to be decrypted: ") cipher_text = input() print("Enter key for decryption: ") print("If you do not know the key and would like to brute force the combinations, enter the word - crack") key = input() if key == 'crack': affine_cipher.brute_force_decrypt(cipher_text) else: key = int(key) plain_text = affine_cipher.decrypt(cipher_text, key) print("Plain text =", plain_text) pyperclip.copy(plain_text) pyperclip.paste() print("The plain text has been copied to your clipboard" + "\n") option = ask_user() if option == '1': continue elif option == '2': print("Exiting program") break else: print("Incorrect input.") print("Exiting program") break else: print("Incorrect input.") option = ask_user() if option == '1': continue elif option == '2': print("Exiting program") break else: print("Incorrect input.") print("Exiting program") break except Exception as e: option = ask_user() if option == '1': continue elif option == '2': print("Exiting program") break else: print("Incorrect input.") print("Exiting program") break
true
true
f7026ec04664ba501e52f493107ef3d5daa3443c
711
py
Python
fantasyStocks/stocks/migrations/0004_auto_20151129_1625.py
ddsnowboard/FantasyStocks
ebf4cee0fdf3b66b803e0456363c470ddc700027
[ "Apache-2.0" ]
null
null
null
fantasyStocks/stocks/migrations/0004_auto_20151129_1625.py
ddsnowboard/FantasyStocks
ebf4cee0fdf3b66b803e0456363c470ddc700027
[ "Apache-2.0" ]
51
2015-05-20T13:57:19.000Z
2018-07-28T03:48:15.000Z
fantasyStocks/stocks/migrations/0004_auto_20151129_1625.py
ddsnowboard/FantasyStocks
ebf4cee0fdf3b66b803e0456363c470ddc700027
[ "Apache-2.0" ]
1
2016-08-07T13:41:22.000Z
2016-08-07T13:41:22.000Z
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import datetime from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('stocks', '0003_auto_20151129_1623'), ] operations = [ migrations.AlterField( model_name='floor', name='floorPlayer', field=models.ForeignKey(to='stocks.Player', related_name='FloorPlayer'), ), migrations.AlterField( model_name='stock', name='last_updated', field=models.DateTimeField(default=datetime.datetime(2015, 11, 29, 22, 5, 30, 24205, tzinfo=utc)), ), ]
26.333333
110
0.623066
from __future__ import unicode_literals from django.db import models, migrations import datetime from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('stocks', '0003_auto_20151129_1623'), ] operations = [ migrations.AlterField( model_name='floor', name='floorPlayer', field=models.ForeignKey(to='stocks.Player', related_name='FloorPlayer'), ), migrations.AlterField( model_name='stock', name='last_updated', field=models.DateTimeField(default=datetime.datetime(2015, 11, 29, 22, 5, 30, 24205, tzinfo=utc)), ), ]
true
true
f7026f66e04017736f515e9d6a921a954d009622
4,450
py
Python
05-Image-Descriptors/solution.py
brenov/ip-usp
06f9f16229a4587e38a3ae89fbe3394d5f1572fd
[ "MIT" ]
null
null
null
05-Image-Descriptors/solution.py
brenov/ip-usp
06f9f16229a4587e38a3ae89fbe3394d5f1572fd
[ "MIT" ]
null
null
null
05-Image-Descriptors/solution.py
brenov/ip-usp
06f9f16229a4587e38a3ae89fbe3394d5f1572fd
[ "MIT" ]
null
null
null
# Name: Breno Maurício de Freitas Viana # NUSP: 11920060 # Course Code: SCC5830 # Year/Semester: 2021/1 # Assignment 5: Image Descriptors import math import numpy as np import imageio from scipy import ndimage np.seterr(divide='ignore', invalid='ignore') LEVELS = 256 # ----- (1) Read Parameters # Get the location of the object image `f` f = input().rstrip() # Get the location of the large image `g` g = input().rstrip() # Get the quantisation parameter `b` b = int(input()) # --- Load images # Object image `f` f = imageio.imread(f) # Large image `g` g = imageio.imread(g) # ----- (2) Preprocessing and Quantisation def luminance(img): """ Get a RGB image as input and return a black&white image. """ N, M, _ = img.shape out = np.empty(img.shape) out = 0.299 * img[:,:,0] + 0.587 * img[:,:,1] + 0.114 * img[:,:,2] return out.astype(np.uint8) # --- Convert the images to black&white f = luminance(f) g = luminance(g) # --- Quantise the images to `b` bits B = 8 - b f = f >> B g = g >> B # ----- (3) Image Descriptors def nh_descriptor(f): """ Return the normalized histogram descriptor. """ hist, _ = np.histogram(f, bins=[i for i in range(2 ** b + 1)]) hist = hist / hist.sum() dc = hist / np.linalg.norm(hist) return dc def ht_descriptor(f): """ Return the Haralick texture descriptors (intensity-level co-ocurrence matrix). """ # Calculate the co-occurence matrix N, M = f.shape C = np.zeros((LEVELS, LEVELS)) for x in range(N - 1): for y in range(M - 1): i = f[x, y] j = f[x + 1, y + 1] C[i][j] += 1 C = C / C.sum() # # Computing the descriptors N, M = C.shape # energy = np.power(C, 2).sum() # epsilon = 0.001 entropy = - (C * np.log(C + epsilon)).sum() # A = np.fromfunction(lambda i, j: (i - j) ** 2, (N, M), dtype=int) contrast = (1 / math.pow(N, 2)) * (C * A).sum() # mu_i, si_i = 0, 0 mu_j, si_j = 0, 0 for k in range(N): a1 = C[k,:].sum() mu_i += k * a1 si_i += math.pow(k - mu_i, 2) * a1 # a2 = C[:,k].sum() mu_j += k * a2 si_j += math.pow(k - mu_j, 2) * a2 # A = np.fromfunction(lambda i, j: (i - j) ** 2, (N, M), dtype=int) correlation = (A * C).sum() - mu_i * mu_j correlation /= (si_i * si_j) # homogeneity = 0 # A = np.fromfunction(lambda i, j: (1 + abs(i - j)), (N, M), dtype=int) homogeneity = (C * A).sum() # # Return the Haralick texture descriptors dt = np.array([energy, entropy, contrast, correlation, homogeneity]) dt = dt / np.linalg.norm(dt) return dt def hg_descriptor(f): """ Return the histogram of oriented gradients descriptor. """ wsx = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]]) wsy = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]) # f = f.astype(np.float64) fx = ndimage.convolve(f, wsx) fy = ndimage.convolve(f, wsy) # N, M = f.shape # div = np.sqrt(np.power(fx, 2) + np.power(fy, 2)).sum() Mg = np.sqrt(np.power(fx, 2) + np.power(fy, 2)) / div # sigma = np.zeros(f.shape) sigma = np.arctan(fy / fx) + np.pi / 2 sigma = np.degrees(sigma) sigma = np.digitize(sigma, np.arange(0, 180, 20)) sigma = sigma.astype(np.uint8) # dg = np.zeros(9) for x in range(N): for y in range(M): dg[sigma[x][y] - 1] += Mg[x][y] # dg = dg / np.linalg.norm(dg) return dg # --- Compute the image descriptors # Calculate the object image descriptors dc = nh_descriptor(f) dt = ht_descriptor(f) dg = hg_descriptor(f) d = np.concatenate((dc, dt, dg)) # ----- (4) Finding Our Object def distance(d, di): """ Calculate the distance of two descriptors. """ return math.sqrt(np.power(d - di, 2).sum()) # --- Search for the object image location in the original image size = f.shape[0] step = size // 2 N, M = g.shape N = N // step M = M // step dist = np.iinfo(np.uint8).max pos_x = None pos_y = None for i in range(N - 1): for j in range(M - 1): # Calculate the window window = g[i*step:i*step+size, j*step:j*step+size] # Calculate the descriptors of the window window_dc = nh_descriptor(window) window_dt = ht_descriptor(window) window_dg = hg_descriptor(window) window_d = np.concatenate((window_dc, window_dt, window_dg)) # Calculate the distance between the window and the object image ndist = distance(d, window_d) if dist > ndist: dist = ndist pos_x, pos_y = i, j # --- Print the found location print(pos_x, pos_y)
21.813725
80
0.596629
import math import numpy as np import imageio from scipy import ndimage np.seterr(divide='ignore', invalid='ignore') LEVELS = 256 f = input().rstrip() g = input().rstrip() b = int(input()) f = imageio.imread(f) g = imageio.imread(g) def luminance(img): N, M, _ = img.shape out = np.empty(img.shape) out = 0.299 * img[:,:,0] + 0.587 * img[:,:,1] + 0.114 * img[:,:,2] return out.astype(np.uint8) f = luminance(f) g = luminance(g) B = 8 - b f = f >> B g = g >> B def nh_descriptor(f): hist, _ = np.histogram(f, bins=[i for i in range(2 ** b + 1)]) hist = hist / hist.sum() dc = hist / np.linalg.norm(hist) return dc def ht_descriptor(f): N, M = f.shape C = np.zeros((LEVELS, LEVELS)) for x in range(N - 1): for y in range(M - 1): i = f[x, y] j = f[x + 1, y + 1] C[i][j] += 1 C = C / C.sum() N, M = C.shape energy = np.power(C, 2).sum() epsilon = 0.001 entropy = - (C * np.log(C + epsilon)).sum() A = np.fromfunction(lambda i, j: (i - j) ** 2, (N, M), dtype=int) contrast = (1 / math.pow(N, 2)) * (C * A).sum() mu_i, si_i = 0, 0 mu_j, si_j = 0, 0 for k in range(N): a1 = C[k,:].sum() mu_i += k * a1 si_i += math.pow(k - mu_i, 2) * a1 a2 = C[:,k].sum() mu_j += k * a2 si_j += math.pow(k - mu_j, 2) * a2 A = np.fromfunction(lambda i, j: (i - j) ** 2, (N, M), dtype=int) correlation = (A * C).sum() - mu_i * mu_j correlation /= (si_i * si_j) homogeneity = 0 A = np.fromfunction(lambda i, j: (1 + abs(i - j)), (N, M), dtype=int) homogeneity = (C * A).sum() dt = np.array([energy, entropy, contrast, correlation, homogeneity]) dt = dt / np.linalg.norm(dt) return dt def hg_descriptor(f): wsx = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]]) wsy = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]) f = f.astype(np.float64) fx = ndimage.convolve(f, wsx) fy = ndimage.convolve(f, wsy) N, M = f.shape div = np.sqrt(np.power(fx, 2) + np.power(fy, 2)).sum() Mg = np.sqrt(np.power(fx, 2) + np.power(fy, 2)) / div sigma = np.zeros(f.shape) sigma = np.arctan(fy / fx) + np.pi / 2 sigma = np.degrees(sigma) sigma = np.digitize(sigma, np.arange(0, 180, 20)) sigma = sigma.astype(np.uint8) dg = np.zeros(9) for x in range(N): for y in range(M): dg[sigma[x][y] - 1] += Mg[x][y] dg = dg / np.linalg.norm(dg) return dg dc = nh_descriptor(f) dt = ht_descriptor(f) dg = hg_descriptor(f) d = np.concatenate((dc, dt, dg)) def distance(d, di): return math.sqrt(np.power(d - di, 2).sum()) size = f.shape[0] step = size // 2 N, M = g.shape N = N // step M = M // step dist = np.iinfo(np.uint8).max pos_x = None pos_y = None for i in range(N - 1): for j in range(M - 1): window = g[i*step:i*step+size, j*step:j*step+size] window_dc = nh_descriptor(window) window_dt = ht_descriptor(window) window_dg = hg_descriptor(window) window_d = np.concatenate((window_dc, window_dt, window_dg)) ndist = distance(d, window_d) if dist > ndist: dist = ndist pos_x, pos_y = i, j print(pos_x, pos_y)
true
true
f7026fc2576bb460e599b24469ce8955bf0fcf3f
2,929
py
Python
tests/unit_tests/test_trunk_lock.py
miguelangel-nubla/teslajsonpy
b916ee5922c8df46414a14d5a6ee7e0a73b0af66
[ "Apache-2.0" ]
null
null
null
tests/unit_tests/test_trunk_lock.py
miguelangel-nubla/teslajsonpy
b916ee5922c8df46414a14d5a6ee7e0a73b0af66
[ "Apache-2.0" ]
null
null
null
tests/unit_tests/test_trunk_lock.py
miguelangel-nubla/teslajsonpy
b916ee5922c8df46414a14d5a6ee7e0a73b0af66
[ "Apache-2.0" ]
null
null
null
"""Test trunk lock.""" import pytest from tests.tesla_mock import TeslaMock from teslajsonpy.controller import Controller from teslajsonpy.trunk import TrunkLock def test_has_battery(monkeypatch): """Test has_battery().""" _mock = TeslaMock(monkeypatch) _controller = Controller(None) _data = _mock.data_request_vehicle() _lock = TrunkLock(_data, _controller) assert not _lock.has_battery() def test_is_locked_on_init(monkeypatch): """Test is_locked() after initialization.""" _mock = TeslaMock(monkeypatch) _controller = Controller(None) _data = _mock.data_request_vehicle() _lock = TrunkLock(_data, _controller) assert _lock is not None assert not _lock.is_locked() @pytest.mark.asyncio async def test_is_locked_after_update(monkeypatch): """Test is_locked() after an update.""" _mock = TeslaMock(monkeypatch) _controller = Controller(None) _data = _mock.data_request_vehicle() _data["vehicle_state"]["rt"] = 0 _lock = TrunkLock(_data, _controller) await _lock.async_update() assert _lock is not None assert _lock.is_locked() @pytest.mark.asyncio async def test_unlock(monkeypatch): """Test unlock().""" _mock = TeslaMock(monkeypatch) _controller = Controller(None) _data = _mock.data_request_vehicle() _data["vehicle_state"]["rt"] = 0 _lock = TrunkLock(_data, _controller) await _lock.async_update() await _lock.unlock() assert _lock is not None assert not _lock.is_locked() @pytest.mark.asyncio async def test_unlock_already_unlocked(monkeypatch): """Test unlock() when already unlocked.""" _mock = TeslaMock(monkeypatch) _controller = Controller(None) _data = _mock.data_request_vehicle() _data["vehicle_state"]["rt"] = 123 _lock = TrunkLock(_data, _controller) await _lock.async_update() await _lock.unlock() assert _lock is not None assert not _lock.is_locked() # Reset to default for next tests _data["vehicle_state"]["rt"] = 0 @pytest.mark.asyncio async def test_lock(monkeypatch): """Test lock().""" _mock = TeslaMock(monkeypatch) _controller = Controller(None) _data = _mock.data_request_vehicle() _data["vehicle_state"]["rt"] = 123 _lock = TrunkLock(_data, _controller) await _lock.async_update() await _lock.lock() assert _lock is not None assert _lock.is_locked() # Reset to default for next tests _data["vehicle_state"]["rt"] = 0 @pytest.mark.asyncio async def test_lock_already_locked(monkeypatch): """Test lock() when already locked.""" _mock = TeslaMock(monkeypatch) _controller = Controller(None) _data = _mock.data_request_vehicle() _data["vehicle_state"]["rt"] = 0 _lock = TrunkLock(_data, _controller) await _lock.async_update() await _lock.lock() assert _lock is not None assert _lock.is_locked()
22.705426
52
0.697166
import pytest from tests.tesla_mock import TeslaMock from teslajsonpy.controller import Controller from teslajsonpy.trunk import TrunkLock def test_has_battery(monkeypatch): _mock = TeslaMock(monkeypatch) _controller = Controller(None) _data = _mock.data_request_vehicle() _lock = TrunkLock(_data, _controller) assert not _lock.has_battery() def test_is_locked_on_init(monkeypatch): _mock = TeslaMock(monkeypatch) _controller = Controller(None) _data = _mock.data_request_vehicle() _lock = TrunkLock(_data, _controller) assert _lock is not None assert not _lock.is_locked() @pytest.mark.asyncio async def test_is_locked_after_update(monkeypatch): _mock = TeslaMock(monkeypatch) _controller = Controller(None) _data = _mock.data_request_vehicle() _data["vehicle_state"]["rt"] = 0 _lock = TrunkLock(_data, _controller) await _lock.async_update() assert _lock is not None assert _lock.is_locked() @pytest.mark.asyncio async def test_unlock(monkeypatch): _mock = TeslaMock(monkeypatch) _controller = Controller(None) _data = _mock.data_request_vehicle() _data["vehicle_state"]["rt"] = 0 _lock = TrunkLock(_data, _controller) await _lock.async_update() await _lock.unlock() assert _lock is not None assert not _lock.is_locked() @pytest.mark.asyncio async def test_unlock_already_unlocked(monkeypatch): _mock = TeslaMock(monkeypatch) _controller = Controller(None) _data = _mock.data_request_vehicle() _data["vehicle_state"]["rt"] = 123 _lock = TrunkLock(_data, _controller) await _lock.async_update() await _lock.unlock() assert _lock is not None assert not _lock.is_locked() _data["vehicle_state"]["rt"] = 0 @pytest.mark.asyncio async def test_lock(monkeypatch): _mock = TeslaMock(monkeypatch) _controller = Controller(None) _data = _mock.data_request_vehicle() _data["vehicle_state"]["rt"] = 123 _lock = TrunkLock(_data, _controller) await _lock.async_update() await _lock.lock() assert _lock is not None assert _lock.is_locked() _data["vehicle_state"]["rt"] = 0 @pytest.mark.asyncio async def test_lock_already_locked(monkeypatch): _mock = TeslaMock(monkeypatch) _controller = Controller(None) _data = _mock.data_request_vehicle() _data["vehicle_state"]["rt"] = 0 _lock = TrunkLock(_data, _controller) await _lock.async_update() await _lock.lock() assert _lock is not None assert _lock.is_locked()
true
true
f70270dafa2d30a1ab08fdf17d8d43b8162686e1
782
py
Python
create_exe.py
hharzer/ytmusic-lib-tracker
b3564f802d0faa912b950bc64e5c84c944049a77
[ "MIT" ]
12
2020-07-21T21:32:48.000Z
2022-02-11T01:54:20.000Z
create_exe.py
hharzer/ytmusic-lib-tracker
b3564f802d0faa912b950bc64e5c84c944049a77
[ "MIT" ]
11
2020-07-20T22:45:31.000Z
2021-05-14T22:27:32.000Z
create_exe.py
hharzer/ytmusic-lib-tracker
b3564f802d0faa912b950bc64e5c84c944049a77
[ "MIT" ]
1
2022-02-17T23:51:38.000Z
2022-02-17T23:51:38.000Z
import sys from cx_Freeze import setup, Executable setup( name='YtMusic-Lib-Tracker', url='https://github.com/czifumasa/ytmusic-lib-tracker', author='Łukasz Lenart', author_email='lukasz.lenart912@gmail.com', version='0.1', license='MIT', description='Useful tools for youtube music. Exporting library to csv, tracking changes in library, summary of transfer from GPM', long_description=open('README.md').read(), options={"build_exe": { 'packages': ['ytmusicapi', 'unidecode'], 'excludes': ['tkinter', 'test', 'unittest', 'pydoc_data'], 'include_files': ['config.ini'], 'optimize': 2, }}, executables=[Executable('ytmusiclibtracker.py', base='console', icon='ytmlt.ico', targetName='YTMusicLibTracker')] )
39.1
134
0.668798
import sys from cx_Freeze import setup, Executable setup( name='YtMusic-Lib-Tracker', url='https://github.com/czifumasa/ytmusic-lib-tracker', author='Łukasz Lenart', author_email='lukasz.lenart912@gmail.com', version='0.1', license='MIT', description='Useful tools for youtube music. Exporting library to csv, tracking changes in library, summary of transfer from GPM', long_description=open('README.md').read(), options={"build_exe": { 'packages': ['ytmusicapi', 'unidecode'], 'excludes': ['tkinter', 'test', 'unittest', 'pydoc_data'], 'include_files': ['config.ini'], 'optimize': 2, }}, executables=[Executable('ytmusiclibtracker.py', base='console', icon='ytmlt.ico', targetName='YTMusicLibTracker')] )
true
true
f70271401af5bd2a93e5ae724aa2698ff1a032ff
5,666
py
Python
examples/python/TRSensors.py
mtrail/alphabot2-pi-zero
49693208b85c8b5edcce07574bdc770c7fe0e06e
[ "MIT" ]
1
2022-03-12T12:32:35.000Z
2022-03-12T12:32:35.000Z
examples/python/TRSensors.py
mtrail/alphabot2-pi-zero
49693208b85c8b5edcce07574bdc770c7fe0e06e
[ "MIT" ]
null
null
null
examples/python/TRSensors.py
mtrail/alphabot2-pi-zero
49693208b85c8b5edcce07574bdc770c7fe0e06e
[ "MIT" ]
1
2022-02-23T13:57:43.000Z
2022-02-23T13:57:43.000Z
#!/usr/bin/python # -*- coding:utf-8 -*- import RPi.GPIO as GPIO import time CS = 5 Clock = 25 Address = 24 DataOut = 23 Button = 7 class TRSensor(object): def __init__(self,numSensors = 5): self.numSensors = numSensors self.calibratedMin = [0] * self.numSensors self.calibratedMax = [1023] * self.numSensors self.last_value = 0 GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) GPIO.setup(Clock,GPIO.OUT) GPIO.setup(Address,GPIO.OUT) GPIO.setup(CS,GPIO.OUT) GPIO.setup(DataOut,GPIO.IN,GPIO.PUD_UP) GPIO.setup(Button,GPIO.IN,GPIO.PUD_UP) """ Reads the sensor values into an array. There *MUST* be space for as many values as there were sensors specified in the constructor. Example usage: unsigned int sensor_values[8]; sensors.read(sensor_values); The values returned are a measure of the reflectance in abstract units, with higher values corresponding to lower reflectance (e.g. a black surface or a void). """ def AnalogRead(self): value = [0]*(self.numSensors+1) #Read Channel0~channel6 AD value for j in range(0,self.numSensors+1): GPIO.output(CS, GPIO.LOW) for i in range(0,4): #sent 4-bit Address if(((j) >> (3 - i)) & 0x01): GPIO.output(Address,GPIO.HIGH) else: GPIO.output(Address,GPIO.LOW) #read MSB 4-bit data value[j] <<= 1 if(GPIO.input(DataOut)): value[j] |= 0x01 GPIO.output(Clock,GPIO.HIGH) GPIO.output(Clock,GPIO.LOW) for i in range(0,6): #read LSB 8-bit data value[j] <<= 1 if(GPIO.input(DataOut)): value[j] |= 0x01 GPIO.output(Clock,GPIO.HIGH) GPIO.output(Clock,GPIO.LOW) #no mean ,just delay # for i in range(0,6): # GPIO.output(Clock,GPIO.HIGH) # GPIO.output(Clock,GPIO.LOW) time.sleep(0.0001) GPIO.output(CS,GPIO.HIGH) # print value[1:] return value[1:] """ Reads the sensors 10 times and uses the results for calibration. The sensor values are not returned; instead, the maximum and minimum values found over time are stored internally and used for the readCalibrated() method. """ def calibrate(self): max_sensor_values = [0]*self.numSensors min_sensor_values = [0]*self.numSensors for j in range(0,10): sensor_values = self.AnalogRead() for i in range(0,self.numSensors): # set the max we found THIS time if((j == 0) or max_sensor_values[i] < sensor_values[i]): max_sensor_values[i] = sensor_values[i] # set the min we found THIS time if((j == 0) or min_sensor_values[i] > sensor_values[i]): min_sensor_values[i] = sensor_values[i] # record the min and max calibration values for i in range(0,self.numSensors): if(min_sensor_values[i] > self.calibratedMin[i]): self.calibratedMin[i] = min_sensor_values[i] if(max_sensor_values[i] < self.calibratedMax[i]): self.calibratedMax[i] = max_sensor_values[i] """ Returns values calibrated to a value between 0 and 1000, where 0 corresponds to the minimum value read by calibrate() and 1000 corresponds to the maximum value. Calibration values are stored separately for each sensor, so that differences in the sensors are accounted for automatically. """ def readCalibrated(self): value = 0 #read the needed values sensor_values = self.AnalogRead() for i in range (0,self.numSensors): denominator = self.calibratedMax[i] - self.calibratedMin[i] if(denominator != 0): value = (sensor_values[i] - self.calibratedMin[i])* 1000 / denominator if(value < 0): value = 0 elif(value > 1000): value = 1000 sensor_values[i] = value #print("readCalibrated",sensor_values) return sensor_values """ Operates the same as read calibrated, but also returns an estimated position of the robot with respect to a line. The estimate is made using a weighted average of the sensor indices multiplied by 1000, so that a return value of 0 indicates that the line is directly below sensor 0, a return value of 1000 indicates that the line is directly below sensor 1, 2000 indicates that it's below sensor 2000, etc. Intermediate values indicate that the line is between two sensors. The formula is: 0*value0 + 1000*value1 + 2000*value2 + ... -------------------------------------------- value0 + value1 + value2 + ... By default, this function assumes a dark line (high values) surrounded by white (low values). If your line is light on black, set the optional second argument white_line to true. In this case, each sensor value will be replaced by (1000-value) before the averaging. """ def readLine(self, white_line = 0): sensor_values = self.readCalibrated() avg = 0 sum = 0 on_line = 0 for i in range(0,self.numSensors): value = sensor_values[i] if(white_line): value = 1000-value # keep track of whether we see the line at all if(value > 200): on_line = 1 # only average in values that are above a noise threshold if(value > 50): avg += value * (i * 1000); # this is for the weighted total, sum += value; #this is for the denominator if(on_line != 1): # If it last read to the left of center, return 0. if(self.last_value < (self.numSensors - 1)*1000/2): #print("left") self.last_value = 0 # If it last read to the right of center, return the max. else: #print("right") self.last_value = (self.numSensors - 1)*1000 else: self.last_value = avg/sum return self.last_value,sensor_values # Simple example prints accel/mag data once per second: if __name__ == '__main__': TR = TRSensor() print("TRSensor Example") while True: print(TR.AnalogRead()) time.sleep(0.2)
29.206186
74
0.680374
import RPi.GPIO as GPIO import time CS = 5 Clock = 25 Address = 24 DataOut = 23 Button = 7 class TRSensor(object): def __init__(self,numSensors = 5): self.numSensors = numSensors self.calibratedMin = [0] * self.numSensors self.calibratedMax = [1023] * self.numSensors self.last_value = 0 GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) GPIO.setup(Clock,GPIO.OUT) GPIO.setup(Address,GPIO.OUT) GPIO.setup(CS,GPIO.OUT) GPIO.setup(DataOut,GPIO.IN,GPIO.PUD_UP) GPIO.setup(Button,GPIO.IN,GPIO.PUD_UP) def AnalogRead(self): value = [0]*(self.numSensors+1) for j in range(0,self.numSensors+1): GPIO.output(CS, GPIO.LOW) for i in range(0,4): if(((j) >> (3 - i)) & 0x01): GPIO.output(Address,GPIO.HIGH) else: GPIO.output(Address,GPIO.LOW) value[j] <<= 1 if(GPIO.input(DataOut)): value[j] |= 0x01 GPIO.output(Clock,GPIO.HIGH) GPIO.output(Clock,GPIO.LOW) for i in range(0,6): value[j] <<= 1 if(GPIO.input(DataOut)): value[j] |= 0x01 GPIO.output(Clock,GPIO.HIGH) GPIO.output(Clock,GPIO.LOW) time.sleep(0.0001) GPIO.output(CS,GPIO.HIGH) return value[1:] def calibrate(self): max_sensor_values = [0]*self.numSensors min_sensor_values = [0]*self.numSensors for j in range(0,10): sensor_values = self.AnalogRead() for i in range(0,self.numSensors): if((j == 0) or max_sensor_values[i] < sensor_values[i]): max_sensor_values[i] = sensor_values[i] if((j == 0) or min_sensor_values[i] > sensor_values[i]): min_sensor_values[i] = sensor_values[i] for i in range(0,self.numSensors): if(min_sensor_values[i] > self.calibratedMin[i]): self.calibratedMin[i] = min_sensor_values[i] if(max_sensor_values[i] < self.calibratedMax[i]): self.calibratedMax[i] = max_sensor_values[i] def readCalibrated(self): value = 0 sensor_values = self.AnalogRead() for i in range (0,self.numSensors): denominator = self.calibratedMax[i] - self.calibratedMin[i] if(denominator != 0): value = (sensor_values[i] - self.calibratedMin[i])* 1000 / denominator if(value < 0): value = 0 elif(value > 1000): value = 1000 sensor_values[i] = value return sensor_values def readLine(self, white_line = 0): sensor_values = self.readCalibrated() avg = 0 sum = 0 on_line = 0 for i in range(0,self.numSensors): value = sensor_values[i] if(white_line): value = 1000-value if(value > 200): on_line = 1 if(value > 50): avg += value * (i * 1000); sum += value; if(on_line != 1): if(self.last_value < (self.numSensors - 1)*1000/2): self.last_value = 0 else: self.last_value = (self.numSensors - 1)*1000 else: self.last_value = avg/sum return self.last_value,sensor_values if __name__ == '__main__': TR = TRSensor() print("TRSensor Example") while True: print(TR.AnalogRead()) time.sleep(0.2)
true
true
f70271444a8a7d243bda48a6efd9534b633a6c2b
1,169
py
Python
server/openapi_server/controllers/text_date_annotation_controller.py
cascadianblue/phi-annotator
0da6c102ec1068e6b15c613e2a90a78f79d15935
[ "Apache-2.0" ]
null
null
null
server/openapi_server/controllers/text_date_annotation_controller.py
cascadianblue/phi-annotator
0da6c102ec1068e6b15c613e2a90a78f79d15935
[ "Apache-2.0" ]
19
2021-07-29T03:14:38.000Z
2022-03-01T06:03:14.000Z
server/openapi_server/controllers/text_date_annotation_controller.py
cascadianblue/phi-annotator
0da6c102ec1068e6b15c613e2a90a78f79d15935
[ "Apache-2.0" ]
null
null
null
import connexion from openapi_server.annotator.phi_types import PhiType from openapi_server.get_annotations import get_annotations from openapi_server.models.error import Error # noqa: E501 from openapi_server.models.text_date_annotation_request import \ TextDateAnnotationRequest # noqa: E501 from openapi_server.models.text_date_annotation_response import \ TextDateAnnotationResponse # noqa: E501 def create_text_date_annotations(): # noqa: E501 """Annotate dates in a clinical note Return the date annotations found in a clinical note # noqa: E501 :rtype: TextDateAnnotations """ res = None status = None if connexion.request.is_json: try: annotation_request = TextDateAnnotationRequest.from_dict( connexion.request.get_json()) # noqa: E501 note = annotation_request.note annotations = get_annotations(note, phi_type=PhiType.DATE) res = TextDateAnnotationResponse(annotations) status = 200 except Exception as error: status = 500 res = Error("Internal error", status, str(error)) return res, status
35.424242
70
0.707442
import connexion from openapi_server.annotator.phi_types import PhiType from openapi_server.get_annotations import get_annotations from openapi_server.models.error import Error from openapi_server.models.text_date_annotation_request import \ TextDateAnnotationRequest from openapi_server.models.text_date_annotation_response import \ TextDateAnnotationResponse def create_text_date_annotations(): res = None status = None if connexion.request.is_json: try: annotation_request = TextDateAnnotationRequest.from_dict( connexion.request.get_json()) note = annotation_request.note annotations = get_annotations(note, phi_type=PhiType.DATE) res = TextDateAnnotationResponse(annotations) status = 200 except Exception as error: status = 500 res = Error("Internal error", status, str(error)) return res, status
true
true
f702724a2087738470998243ccd012825927600c
1,370
py
Python
fcos_core/solver/build.py
CityU-AIM-Group/SFPolypDA
3902577cf9549a65be7ba89e2c11a7115158b531
[ "BSD-2-Clause" ]
5
2021-12-17T10:55:54.000Z
2022-03-03T16:09:23.000Z
fcos_core/solver/build.py
CityU-AIM-Group/SFPolypDA
3902577cf9549a65be7ba89e2c11a7115158b531
[ "BSD-2-Clause" ]
null
null
null
fcos_core/solver/build.py
CityU-AIM-Group/SFPolypDA
3902577cf9549a65be7ba89e2c11a7115158b531
[ "BSD-2-Clause" ]
null
null
null
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import torch import logging from .lr_scheduler import WarmupMultiStepLR def make_optimizer(cfg, model): logger = logging.getLogger("fcos_core.trainer") params = [] for key, value in model.named_parameters(): if not value.requires_grad: continue lr = cfg.SOLVER.BASE_LR weight_decay = cfg.SOLVER.WEIGHT_DECAY if "bias" in key: lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS if key.endswith(".offset.weight") or key.endswith(".offset.bias"): logger.info("set lr factor of {} as {}".format( key, cfg.SOLVER.DCONV_OFFSETS_LR_FACTOR )) lr *= cfg.SOLVER.DCONV_OFFSETS_LR_FACTOR params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}] optimizer = torch.optim.SGD(params, lr, momentum=cfg.SOLVER.MOMENTUM) if cfg.SOLVER.ADAM: optimizer = torch.optim.Adam(params) return optimizer def make_lr_scheduler(cfg, optimizer): return WarmupMultiStepLR( optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, warmup_factor=cfg.SOLVER.WARMUP_FACTOR, warmup_iters=cfg.SOLVER.WARMUP_ITERS, warmup_method=cfg.SOLVER.WARMUP_METHOD, )
34.25
79
0.652555
import torch import logging from .lr_scheduler import WarmupMultiStepLR def make_optimizer(cfg, model): logger = logging.getLogger("fcos_core.trainer") params = [] for key, value in model.named_parameters(): if not value.requires_grad: continue lr = cfg.SOLVER.BASE_LR weight_decay = cfg.SOLVER.WEIGHT_DECAY if "bias" in key: lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS if key.endswith(".offset.weight") or key.endswith(".offset.bias"): logger.info("set lr factor of {} as {}".format( key, cfg.SOLVER.DCONV_OFFSETS_LR_FACTOR )) lr *= cfg.SOLVER.DCONV_OFFSETS_LR_FACTOR params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}] optimizer = torch.optim.SGD(params, lr, momentum=cfg.SOLVER.MOMENTUM) if cfg.SOLVER.ADAM: optimizer = torch.optim.Adam(params) return optimizer def make_lr_scheduler(cfg, optimizer): return WarmupMultiStepLR( optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, warmup_factor=cfg.SOLVER.WARMUP_FACTOR, warmup_iters=cfg.SOLVER.WARMUP_ITERS, warmup_method=cfg.SOLVER.WARMUP_METHOD, )
true
true
f702725052b1f21007cb4ab5b3c83889e16ebbf8
3,172
py
Python
src/processrunner/kitchenpatch.py
arobb/python-process-runner
e5265a052f501f533f6d0c8bbe3d70ab120b679f
[ "MIT" ]
3
2019-07-16T17:32:29.000Z
2021-11-08T05:00:26.000Z
src/processrunner/kitchenpatch.py
arobb/python-process-runner
e5265a052f501f533f6d0c8bbe3d70ab120b679f
[ "MIT" ]
null
null
null
src/processrunner/kitchenpatch.py
arobb/python-process-runner
e5265a052f501f533f6d0c8bbe3d70ab120b679f
[ "MIT" ]
2
2017-07-06T23:59:12.000Z
2019-07-16T17:32:32.000Z
# -*- coding: utf-8 -*- """Patched version of PyPi Kitchen's Python 3 getwriter function. Removes extraneous newlines.""" import codecs from kitchen.text.converters import to_bytes def getwriter(encoding): """Return a :class:`codecs.StreamWriter` that resists tracing back. :arg encoding: Encoding to use for transforming :class:`str` strings into byte :class:`bytes`. :rtype: :class:`codecs.StreamWriter` :returns: :class:`~codecs.StreamWriter` that you can instantiate to wrap output streams to automatically translate :class:`str` strings into :attr:`encoding`. This is a reimplemetation of :func:`codecs.getwriter` that returns a :class:`~codecs.StreamWriter` that resists issuing tracebacks. The :class:`~codecs.StreamWriter` that is returned uses :func:`kitchen.text.converters.to_bytes` to convert :class:`str` strings into byte :class:`bytes`. The departures from :func:`codecs.getwriter` are: 1) The :class:`~codecs.StreamWriter` that is returned will take byte :class:`bytes` as well as :class:`str` strings. Any byte :class:`bytes` will be passed through unmodified. 2) The default error handler for unknown bytes is to ``replace`` the bytes with the unknown character (``?`` in most ascii-based encodings, ``�`` in the utf encodings) whereas :func:`codecs.getwriter` defaults to ``strict``. Like :class:`codecs.StreamWriter`, the returned :class:`~codecs.StreamWriter` can have its error handler changed in code by setting ``stream.errors = 'new_handler_name'`` Example usage:: $ LC_ALL=C python >>> import sys >>> from kitchen.text.converters import getwriter >>> UTF8Writer = getwriter('utf-8') >>> unwrapped_stdout = sys.stdout >>> sys.stdout = UTF8Writer(unwrapped_stdout) >>> print 'caf\\xc3\\xa9' café >>> print u'caf\\xe9' café >>> ASCIIWriter = getwriter('ascii') >>> sys.stdout = ASCIIWriter(unwrapped_stdout) >>> print 'caf\\xc3\\xa9' café >>> print u'caf\\xe9' caf? .. seealso:: API docs for :class:`codecs.StreamWriter` and :func:`codecs.getwriter` and `Print Fails <http://wiki.python.org/moin/PrintFails>`_ on the python wiki. .. versionadded:: kitchen 0.2a2, API: kitchen.text 1.1.0 """ class _StreamWriter(codecs.StreamWriter): # :W0223: We don't need to implement all methods of StreamWriter. # This is not the actual class that gets used but a replacement for # the actual class. # :C0111: We're implementing an API from the stdlib. Just point # people at that documentation instead of writing docstrings here. # pylint:disable-msg=W0223,C0111 def __init__(self, stream, errors='replace'): codecs.StreamWriter.__init__(self, stream, errors) def encode(self, msg, errors='replace'): return (to_bytes(msg, encoding=self.encoding, errors=errors), len(msg)) _StreamWriter.encoding = encoding return _StreamWriter
40.151899
78
0.651009
import codecs from kitchen.text.converters import to_bytes def getwriter(encoding): class _StreamWriter(codecs.StreamWriter): # This is not the actual class that gets used but a replacement for # the actual class. # :C0111: We're implementing an API from the stdlib. Just point def __init__(self, stream, errors='replace'): codecs.StreamWriter.__init__(self, stream, errors) def encode(self, msg, errors='replace'): return (to_bytes(msg, encoding=self.encoding, errors=errors), len(msg)) _StreamWriter.encoding = encoding return _StreamWriter
true
true
f702735ba5948c9492dc6dfb2c9ece987a5886b2
1,041
py
Python
tests/parser/bug.02.test.py
veltri/DLV2
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
[ "Apache-2.0" ]
null
null
null
tests/parser/bug.02.test.py
veltri/DLV2
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
[ "Apache-2.0" ]
null
null
null
tests/parser/bug.02.test.py
veltri/DLV2
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
[ "Apache-2.0" ]
null
null
null
input = """ colored(2,g) :- not diff_col(2,g). colored(2,y) :- not diff_col(2,y). colored(3,g) :- not diff_col(3,g). colored(3,y) :- not diff_col(3,y). diff_col(2,g) :- colored(2,y). diff_col(3,g) :- colored(3,y). diff_col(2,y) :- colored(2,g). diff_col(3,y) :- colored(3,g). no_stable :- colored(2,2), colored(3,2), not no_stable. no_stable :- colored(2,3), colored(3,3), not no_stable. no_stable :- colored(2,g), colored(3,g), not no_stable. no_stable :- colored(2,y), colored(3,y), not no_stable. """ output = """ colored(2,g) :- not diff_col(2,g). colored(2,y) :- not diff_col(2,y). colored(3,g) :- not diff_col(3,g). colored(3,y) :- not diff_col(3,y). diff_col(2,g) :- colored(2,y). diff_col(3,g) :- colored(3,y). diff_col(2,y) :- colored(2,g). diff_col(3,y) :- colored(3,g). no_stable :- colored(2,2), colored(3,2), not no_stable. no_stable :- colored(2,3), colored(3,3), not no_stable. no_stable :- colored(2,g), colored(3,g), not no_stable. no_stable :- colored(2,y), colored(3,y), not no_stable. """
31.545455
56
0.62536
input = """ colored(2,g) :- not diff_col(2,g). colored(2,y) :- not diff_col(2,y). colored(3,g) :- not diff_col(3,g). colored(3,y) :- not diff_col(3,y). diff_col(2,g) :- colored(2,y). diff_col(3,g) :- colored(3,y). diff_col(2,y) :- colored(2,g). diff_col(3,y) :- colored(3,g). no_stable :- colored(2,2), colored(3,2), not no_stable. no_stable :- colored(2,3), colored(3,3), not no_stable. no_stable :- colored(2,g), colored(3,g), not no_stable. no_stable :- colored(2,y), colored(3,y), not no_stable. """ output = """ colored(2,g) :- not diff_col(2,g). colored(2,y) :- not diff_col(2,y). colored(3,g) :- not diff_col(3,g). colored(3,y) :- not diff_col(3,y). diff_col(2,g) :- colored(2,y). diff_col(3,g) :- colored(3,y). diff_col(2,y) :- colored(2,g). diff_col(3,y) :- colored(3,g). no_stable :- colored(2,2), colored(3,2), not no_stable. no_stable :- colored(2,3), colored(3,3), not no_stable. no_stable :- colored(2,g), colored(3,g), not no_stable. no_stable :- colored(2,y), colored(3,y), not no_stable. """
true
true
f70273a4f204930ac82371643af949cac1c6a644
3,534
py
Python
src/rpdk/core/boto_helpers.py
wbingli/cloudformation-cli
75a39cb7f73596f01ce04c85967dea74fe5a893d
[ "Apache-2.0" ]
200
2019-12-02T03:33:37.000Z
2022-03-31T21:50:23.000Z
src/rpdk/core/boto_helpers.py
wbingli/cloudformation-cli
75a39cb7f73596f01ce04c85967dea74fe5a893d
[ "Apache-2.0" ]
353
2019-11-26T20:20:56.000Z
2022-03-31T00:53:05.000Z
src/rpdk/core/boto_helpers.py
wbingli/cloudformation-cli
75a39cb7f73596f01ce04c85967dea74fe5a893d
[ "Apache-2.0" ]
110
2019-11-26T21:58:16.000Z
2022-03-26T19:02:11.000Z
import logging from datetime import datetime import botocore.loaders import botocore.regions from boto3 import Session as Boto3Session from botocore.exceptions import ClientError from .exceptions import CLIMisconfiguredError, DownstreamError LOG = logging.getLogger(__name__) BOTO_CRED_KEYS = ("aws_access_key_id", "aws_secret_access_key", "aws_session_token") LOWER_CAMEL_CRED_KEYS = ("accessKeyId", "secretAccessKey", "sessionToken") def create_sdk_session(region_name=None): def _known_error(msg): raise CLIMisconfiguredError( msg + ". Please ensure your AWS CLI is configured correctly: " "https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html" ) session = Boto3Session(region_name=region_name) if session.region_name is None: _known_error("No region specified") if session.get_credentials() is None: _known_error("No credentials specified") return session def get_temporary_credentials(session, key_names=BOTO_CRED_KEYS, role_arn=None): sts_client = session.client( "sts", endpoint_url=get_service_endpoint("sts", session.region_name), region_name=session.region_name, ) if role_arn: session_name = "CloudFormationContractTest-{:%Y%m%d%H%M%S}".format( datetime.now() ) try: response = sts_client.assume_role( RoleArn=role_arn, RoleSessionName=session_name, DurationSeconds=900 ) except ClientError: # pylint: disable=W1201 LOG.debug( "Getting session token resulted in unknown ClientError. " + "Could not assume specified role '%s'.", role_arn, ) raise DownstreamError() from Exception( "Could not assume specified role '{}'".format(role_arn) ) temp = response["Credentials"] creds = (temp["AccessKeyId"], temp["SecretAccessKey"], temp["SessionToken"]) else: frozen = session.get_credentials().get_frozen_credentials() if frozen.token: creds = (frozen.access_key, frozen.secret_key, frozen.token) else: try: response = sts_client.get_session_token(DurationSeconds=900) except ClientError as e: LOG.debug( "Getting session token resulted in unknown ClientError", exc_info=e ) raise DownstreamError("Could not retrieve session token") from e temp = response["Credentials"] creds = (temp["AccessKeyId"], temp["SecretAccessKey"], temp["SessionToken"]) return dict(zip(key_names, creds)) def get_service_endpoint(service, region): loader = botocore.loaders.create_loader() data = loader.load_data("endpoints") resolver = botocore.regions.EndpointResolver(data) endpoint_data = resolver.construct_endpoint(service, region) return "https://" + endpoint_data["hostname"] def get_account(session, temporary_credentials): sts_client = session.client( "sts", endpoint_url=get_service_endpoint("sts", session.region_name), region_name=session.region_name, aws_access_key_id=temporary_credentials["accessKeyId"], aws_secret_access_key=temporary_credentials["secretAccessKey"], aws_session_token=temporary_credentials["sessionToken"], ) response = sts_client.get_caller_identity() return response.get("Account")
36.43299
88
0.665818
import logging from datetime import datetime import botocore.loaders import botocore.regions from boto3 import Session as Boto3Session from botocore.exceptions import ClientError from .exceptions import CLIMisconfiguredError, DownstreamError LOG = logging.getLogger(__name__) BOTO_CRED_KEYS = ("aws_access_key_id", "aws_secret_access_key", "aws_session_token") LOWER_CAMEL_CRED_KEYS = ("accessKeyId", "secretAccessKey", "sessionToken") def create_sdk_session(region_name=None): def _known_error(msg): raise CLIMisconfiguredError( msg + ". Please ensure your AWS CLI is configured correctly: " "https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html" ) session = Boto3Session(region_name=region_name) if session.region_name is None: _known_error("No region specified") if session.get_credentials() is None: _known_error("No credentials specified") return session def get_temporary_credentials(session, key_names=BOTO_CRED_KEYS, role_arn=None): sts_client = session.client( "sts", endpoint_url=get_service_endpoint("sts", session.region_name), region_name=session.region_name, ) if role_arn: session_name = "CloudFormationContractTest-{:%Y%m%d%H%M%S}".format( datetime.now() ) try: response = sts_client.assume_role( RoleArn=role_arn, RoleSessionName=session_name, DurationSeconds=900 ) except ClientError: LOG.debug( "Getting session token resulted in unknown ClientError. " + "Could not assume specified role '%s'.", role_arn, ) raise DownstreamError() from Exception( "Could not assume specified role '{}'".format(role_arn) ) temp = response["Credentials"] creds = (temp["AccessKeyId"], temp["SecretAccessKey"], temp["SessionToken"]) else: frozen = session.get_credentials().get_frozen_credentials() if frozen.token: creds = (frozen.access_key, frozen.secret_key, frozen.token) else: try: response = sts_client.get_session_token(DurationSeconds=900) except ClientError as e: LOG.debug( "Getting session token resulted in unknown ClientError", exc_info=e ) raise DownstreamError("Could not retrieve session token") from e temp = response["Credentials"] creds = (temp["AccessKeyId"], temp["SecretAccessKey"], temp["SessionToken"]) return dict(zip(key_names, creds)) def get_service_endpoint(service, region): loader = botocore.loaders.create_loader() data = loader.load_data("endpoints") resolver = botocore.regions.EndpointResolver(data) endpoint_data = resolver.construct_endpoint(service, region) return "https://" + endpoint_data["hostname"] def get_account(session, temporary_credentials): sts_client = session.client( "sts", endpoint_url=get_service_endpoint("sts", session.region_name), region_name=session.region_name, aws_access_key_id=temporary_credentials["accessKeyId"], aws_secret_access_key=temporary_credentials["secretAccessKey"], aws_session_token=temporary_credentials["sessionToken"], ) response = sts_client.get_caller_identity() return response.get("Account")
true
true
f702747b82118bbd64d8fc67a01e1f638cbb45dd
26,042
py
Python
src/transformersX/models/cutoffbert/modeling_cutoffbert.py
stevezheng23/fewshot_nlp_pt
aaca4658aaa48a5a45dfd7d5ee7282d7f7c74be2
[ "Apache-2.0" ]
2
2021-08-06T05:43:55.000Z
2022-03-17T22:31:21.000Z
src/transformersX/models/cutoffbert/modeling_cutoffbert.py
stevezheng23/fewshot_nlp_pt
aaca4658aaa48a5a45dfd7d5ee7282d7f7c74be2
[ "Apache-2.0" ]
null
null
null
src/transformersX/models/cutoffbert/modeling_cutoffbert.py
stevezheng23/fewshot_nlp_pt
aaca4658aaa48a5a45dfd7d5ee7282d7f7c74be2
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch CUTOFFBERT model. """ import math import os import warnings import numpy as np from dataclasses import dataclass from typing import Optional, Tuple import torch import torch.utils.checkpoint import torch.nn.functional as F from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss, KLDivLoss from torch.distributions.beta import Beta from ...activations import ACT2FN from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, DualPassageEncoderModelOutput, ) from ...modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from ...utils import logging from .configuration_cutoffbert import CutoffBertConfig from ..bert.modeling_bert import BertEmbeddings as CutoffBertEmbeddings from ..bert.modeling_bert import BertEncoder as CutoffBertEncoder from ..bert.modeling_bert import BertPooler as CutoffBertPooler logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "bert-base-uncased" _CONFIG_FOR_DOC = "CutoffBertConfig" _TOKENIZER_FOR_DOC = "CutoffBertTokenizer" CUTOFFBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "bert-base-uncased", "bert-large-uncased", "bert-base-cased", "bert-large-cased", "bert-base-multilingual-uncased", "bert-base-multilingual-cased", # See all BERT models at https://huggingface.co/models?filter=bert ] def load_tf_weights_in_cutoffbert(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ): logger.info(f"Skipping {'/'.join(name)}") continue pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "weight") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f"Skipping {'/'.join(name)}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == "_embeddings": pointer = getattr(pointer, "weight") elif m_name == "kernel": array = np.transpose(array) try: assert ( pointer.shape == array.shape ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info(f"Initialize PyTorch weight {name}") pointer.data = torch.from_numpy(array) return model class CutoffBertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = CutoffBertConfig load_tf_weights = load_tf_weights_in_cutoffbert base_model_prefix = "bert" _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) CUTOFFBERT_START_DOCSTRING = r""" This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ CUTOFFBERT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.BertTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: - 0 corresponds to a `sentence A` token, - 1 corresponds to a `sentence B` token. `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ @add_start_docstrings( "The bare CutoffBert Model transformer outputting raw hidden-states without any specific head on top.", CUTOFFBERT_START_DOCSTRING, ) class CutoffBertModel(CutoffBertPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `Attention is all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder` argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an input to the forward pass. """ def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = CutoffBertEmbeddings(config) self.encoder = CutoffBertEncoder(config) self.pooler = CutoffBertPooler(config) if add_pooling_layer else None self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(CUTOFFBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() batch_size, seq_length = input_shape elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @add_start_docstrings( """ CutoffBert Model transformer with a sequence classification head on top (a linear layer on top of the pooled output) + Cut-off data augmentation support. """, CUTOFFBERT_START_DOCSTRING, ) class CutoffBertForSequenceClassification(CutoffBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.cls_token_id = config.cls_token_id self.sep_token_id = config.sep_token_id self.mask_token_id = config.mask_token_id self.masking_prob = config.cutoff_masking_prob self.temperature = config.cutoff_temperature self.mask_loss_wgt = config.cutoff_mask_loss_wgt self.js_loss_wgt = config.cutoff_js_loss_wgt self.config = config self.bert = CutoffBertModel(config) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def _apply_cutoff(self, inputs): masked_inputs = inputs.clone() valid_masking_indices = (inputs != self.cls_token_id) & (inputs != self.sep_token_id) random_masking_indices = torch.bernoulli(torch.full(inputs.shape, self.masking_prob, device=inputs.device)).bool() masking_indices = random_masking_indices & valid_masking_indices masked_inputs[masking_indices] = self.mask_token_id return masked_inputs @add_start_docstrings_to_model_forward(CUTOFFBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is None: outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = self.dropout(outputs[1]) logits = self.classifier(pooled_output) if not return_dict: return (logits,) + outputs[2:] return SequenceClassifierOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) b, l = input_ids.size() masked_input_ids = self._apply_cutoff(input_ids.clone()) flatten_input_ids = torch.stack((input_ids, masked_input_ids), dim=1).reshape(-1, l) flatten_attention_mask = attention_mask.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if attention_mask is not None else None flatten_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if token_type_ids is not None else None flatten_position_ids = position_ids.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if position_ids is not None else None flatten_inputs_embeds = inputs_embeds.unsqueeze(1).expand(-1, 2, -1, -1).reshape(-1, l, self.config.hidden_size) if inputs_embeds is not None else None flatten_outputs = self.bert( flatten_input_ids, attention_mask=flatten_attention_mask, token_type_ids=flatten_token_type_ids, position_ids=flatten_position_ids, head_mask=head_mask, inputs_embeds=flatten_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) flatten_pooled_output = self.dropout(flatten_outputs[1]) flatten_logits = self.classifier(flatten_pooled_output) logits, masked_logits = flatten_logits.reshape(b, 2, self.config.num_labels).chunk(2, dim=1) logits, masked_logits = logits.squeeze(dim=1).contiguous(), masked_logits.squeeze(dim=1).contiguous() loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if self.mask_loss_wgt is not None and self.mask_loss_wgt > 0.0: mask_loss = loss_fct(masked_logits.view(-1, self.num_labels), labels.view(-1)) loss += mask_loss * self.mask_loss_wgt if self.js_loss_wgt is not None and self.js_loss_wgt > 0.0: kl_loss_fct = KLDivLoss(reduction="batchmean") src_logits, trg_logits = logits, masked_logits mean_logits = (src_logits + trg_logits) * 0.5 src_loss = kl_loss_fct( F.log_softmax(src_logits / self.temperature, dim=-1), F.softmax(mean_logits / self.temperature, dim=-1) ) * (self.temperature ** 2) trg_loss = kl_loss_fct( F.log_softmax(trg_logits / self.temperature, dim=-1), F.softmax(mean_logits / self.temperature, dim=-1) ) * (self.temperature ** 2) js_loss = (src_loss + trg_loss) * 0.5 loss += js_loss * self.js_loss_wgt if not return_dict: return (loss, logits) return SequenceClassifierOutput( loss=loss, logits=logits, )
44.9
213
0.672183
import math import os import warnings import numpy as np from dataclasses import dataclass from typing import Optional, Tuple import torch import torch.utils.checkpoint import torch.nn.functional as F from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss, KLDivLoss from torch.distributions.beta import Beta from ...activations import ACT2FN from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, DualPassageEncoderModelOutput, ) from ...modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from ...utils import logging from .configuration_cutoffbert import CutoffBertConfig from ..bert.modeling_bert import BertEmbeddings as CutoffBertEmbeddings from ..bert.modeling_bert import BertEncoder as CutoffBertEncoder from ..bert.modeling_bert import BertPooler as CutoffBertPooler logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "bert-base-uncased" _CONFIG_FOR_DOC = "CutoffBertConfig" _TOKENIZER_FOR_DOC = "CutoffBertTokenizer" CUTOFFBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "bert-base-uncased", "bert-large-uncased", "bert-base-cased", "bert-large-cased", "bert-base-multilingual-uncased", "bert-base-multilingual-cased", ] def load_tf_weights_in_cutoffbert(model, config, tf_checkpoint_path): try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split("/") if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ): logger.info(f"Skipping {'/'.join(name)}") continue pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "weight") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f"Skipping {'/'.join(name)}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == "_embeddings": pointer = getattr(pointer, "weight") elif m_name == "kernel": array = np.transpose(array) try: assert ( pointer.shape == array.shape ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info(f"Initialize PyTorch weight {name}") pointer.data = torch.from_numpy(array) return model class CutoffBertPreTrainedModel(PreTrainedModel): config_class = CutoffBertConfig load_tf_weights = load_tf_weights_in_cutoffbert base_model_prefix = "bert" _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) CUTOFFBERT_START_DOCSTRING = r""" This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ CUTOFFBERT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.BertTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: - 0 corresponds to a `sentence A` token, - 1 corresponds to a `sentence B` token. `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ @add_start_docstrings( "The bare CutoffBert Model transformer outputting raw hidden-states without any specific head on top.", CUTOFFBERT_START_DOCSTRING, ) class CutoffBertModel(CutoffBertPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = CutoffBertEmbeddings(config) self.encoder = CutoffBertEncoder(config) self.pooler = CutoffBertPooler(config) if add_pooling_layer else None self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(CUTOFFBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() batch_size, seq_length = input_shape elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @add_start_docstrings( """ CutoffBert Model transformer with a sequence classification head on top (a linear layer on top of the pooled output) + Cut-off data augmentation support. """, CUTOFFBERT_START_DOCSTRING, ) class CutoffBertForSequenceClassification(CutoffBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.cls_token_id = config.cls_token_id self.sep_token_id = config.sep_token_id self.mask_token_id = config.mask_token_id self.masking_prob = config.cutoff_masking_prob self.temperature = config.cutoff_temperature self.mask_loss_wgt = config.cutoff_mask_loss_wgt self.js_loss_wgt = config.cutoff_js_loss_wgt self.config = config self.bert = CutoffBertModel(config) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def _apply_cutoff(self, inputs): masked_inputs = inputs.clone() valid_masking_indices = (inputs != self.cls_token_id) & (inputs != self.sep_token_id) random_masking_indices = torch.bernoulli(torch.full(inputs.shape, self.masking_prob, device=inputs.device)).bool() masking_indices = random_masking_indices & valid_masking_indices masked_inputs[masking_indices] = self.mask_token_id return masked_inputs @add_start_docstrings_to_model_forward(CUTOFFBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is None: outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = self.dropout(outputs[1]) logits = self.classifier(pooled_output) if not return_dict: return (logits,) + outputs[2:] return SequenceClassifierOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) b, l = input_ids.size() masked_input_ids = self._apply_cutoff(input_ids.clone()) flatten_input_ids = torch.stack((input_ids, masked_input_ids), dim=1).reshape(-1, l) flatten_attention_mask = attention_mask.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if attention_mask is not None else None flatten_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if token_type_ids is not None else None flatten_position_ids = position_ids.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if position_ids is not None else None flatten_inputs_embeds = inputs_embeds.unsqueeze(1).expand(-1, 2, -1, -1).reshape(-1, l, self.config.hidden_size) if inputs_embeds is not None else None flatten_outputs = self.bert( flatten_input_ids, attention_mask=flatten_attention_mask, token_type_ids=flatten_token_type_ids, position_ids=flatten_position_ids, head_mask=head_mask, inputs_embeds=flatten_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) flatten_pooled_output = self.dropout(flatten_outputs[1]) flatten_logits = self.classifier(flatten_pooled_output) logits, masked_logits = flatten_logits.reshape(b, 2, self.config.num_labels).chunk(2, dim=1) logits, masked_logits = logits.squeeze(dim=1).contiguous(), masked_logits.squeeze(dim=1).contiguous() loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if self.mask_loss_wgt is not None and self.mask_loss_wgt > 0.0: mask_loss = loss_fct(masked_logits.view(-1, self.num_labels), labels.view(-1)) loss += mask_loss * self.mask_loss_wgt if self.js_loss_wgt is not None and self.js_loss_wgt > 0.0: kl_loss_fct = KLDivLoss(reduction="batchmean") src_logits, trg_logits = logits, masked_logits mean_logits = (src_logits + trg_logits) * 0.5 src_loss = kl_loss_fct( F.log_softmax(src_logits / self.temperature, dim=-1), F.softmax(mean_logits / self.temperature, dim=-1) ) * (self.temperature ** 2) trg_loss = kl_loss_fct( F.log_softmax(trg_logits / self.temperature, dim=-1), F.softmax(mean_logits / self.temperature, dim=-1) ) * (self.temperature ** 2) js_loss = (src_loss + trg_loss) * 0.5 loss += js_loss * self.js_loss_wgt if not return_dict: return (loss, logits) return SequenceClassifierOutput( loss=loss, logits=logits, )
true
true
f702751fc715aa75b5c77b637dfcf7dfc73f80cd
3,001
py
Python
litex/build/lattice/bit_to_svf.py
osterwood/litex
db20cb172dc982c5879aa8080ec7aa18de181cc5
[ "ADSL" ]
1,501
2016-04-19T18:16:21.000Z
2022-03-31T17:46:31.000Z
litex/build/lattice/bit_to_svf.py
gregdavill/litex
230ba5f7baff2d6fe98fbb7eecabb48aaea250e4
[ "ADSL" ]
1,135
2016-04-19T05:49:14.000Z
2022-03-31T15:21:19.000Z
litex/build/lattice/bit_to_svf.py
gregdavill/litex
230ba5f7baff2d6fe98fbb7eecabb48aaea250e4
[ "ADSL" ]
357
2016-04-19T05:00:24.000Z
2022-03-31T11:28:32.000Z
#!/usr/bin/env python3 # Very basic bitstream to SVF converter # This file is Copyright (c) 2018 David Shah <dave@ds0.me> import sys import textwrap max_row_size = 100000 def bitreverse(x): y = 0 for i in range(8): if (x >> (7 - i)) & 1 == 1: y |= (1 << i) return y def bit_to_svf(bit, svf): with open(bit, 'rb') as bitf: bs = bitf.read() # Autodetect IDCODE from bitstream idcode_cmd = bytes([0xE2, 0x00, 0x00, 0x00]) idcode = None for i in range(len(bs) - 4): if bs[i:i+4] == idcode_cmd: idcode = bs[i+4] << 24 idcode |= bs[i+5] << 16 idcode |= bs[i+6] << 8 idcode |= bs[i+7] break if idcode is None: print("Failed to find IDCODE in bitstream, check bitstream is valid") sys.exit(1) print("IDCODE in bitstream is 0x%08x" % idcode) bitf.seek(0) with open(svf, 'w') as svf: print(""" HDR 0; HIR 0; TDR 0; TIR 0; ENDDR DRPAUSE; ENDIR IRPAUSE; STATE IDLE; """, file=svf) print(""" SIR 8 TDI (E0); SDR 32 TDI (00000000) TDO ({:08X}) MASK (FFFFFFFF); """.format(idcode), file=svf) print(""" SIR 8 TDI (1C); SDR 510 TDI (3FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); SIR 8 TDI (C6); SDR 8 TDI (00); RUNTEST IDLE 2 TCK 1.00E-02 SEC; SIR 8 TDI (3C); SDR 32 TDI (00000000) TDO (00000000) MASK (0000B000); SIR 8 TDI (46); SDR 8 TDI (01); RUNTEST IDLE 2 TCK 1.00E-02 SEC; SIR 8 TDI (7A); RUNTEST IDLE 2 TCK 1.00E-02 SEC; """, file=svf) while True: chunk = bitf.read(max_row_size//8) if not chunk: break # Convert chunk to bit-reversed hex br_chunk = [bitreverse(x) for x in chunk] hex_chunk = ["{:02X}".format(x) for x in reversed(br_chunk)] print("\n".join(textwrap.wrap("SDR {} TDI ({});".format(8*len(chunk), "".join(hex_chunk)), 100)), file=svf) print(""" SIR 8 TDI (FF); RUNTEST IDLE 100 TCK 1.00E-02 SEC; SIR 8 TDI (C0); RUNTEST IDLE 2 TCK 1.00E-03 SEC; SDR 32 TDI (00000000) TDO (00000000) MASK (FFFFFFFF); ! Shift in ISC DISABLE(0x26) instruction SIR 8 TDI (26); RUNTEST IDLE 2 TCK 2.00E-01 SEC; ! Shift in BYPASS(0xFF) instruction SIR 8 TDI (FF); RUNTEST IDLE 2 TCK 1.00E-03 SEC; ! Shift in LSC_READ_STATUS(0x3C) instruction SIR 8 TDI (3C); SDR 32 TDI (00000000) TDO (00000100) MASK (00002100); """, file=svf) if __name__ == "__main__": bit_to_svf(sys.argv[1], sys.argv[2])
27.53211
123
0.518494
import sys import textwrap max_row_size = 100000 def bitreverse(x): y = 0 for i in range(8): if (x >> (7 - i)) & 1 == 1: y |= (1 << i) return y def bit_to_svf(bit, svf): with open(bit, 'rb') as bitf: bs = bitf.read() idcode_cmd = bytes([0xE2, 0x00, 0x00, 0x00]) idcode = None for i in range(len(bs) - 4): if bs[i:i+4] == idcode_cmd: idcode = bs[i+4] << 24 idcode |= bs[i+5] << 16 idcode |= bs[i+6] << 8 idcode |= bs[i+7] break if idcode is None: print("Failed to find IDCODE in bitstream, check bitstream is valid") sys.exit(1) print("IDCODE in bitstream is 0x%08x" % idcode) bitf.seek(0) with open(svf, 'w') as svf: print(""" HDR 0; HIR 0; TDR 0; TIR 0; ENDDR DRPAUSE; ENDIR IRPAUSE; STATE IDLE; """, file=svf) print(""" SIR 8 TDI (E0); SDR 32 TDI (00000000) TDO ({:08X}) MASK (FFFFFFFF); """.format(idcode), file=svf) print(""" SIR 8 TDI (1C); SDR 510 TDI (3FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); SIR 8 TDI (C6); SDR 8 TDI (00); RUNTEST IDLE 2 TCK 1.00E-02 SEC; SIR 8 TDI (3C); SDR 32 TDI (00000000) TDO (00000000) MASK (0000B000); SIR 8 TDI (46); SDR 8 TDI (01); RUNTEST IDLE 2 TCK 1.00E-02 SEC; SIR 8 TDI (7A); RUNTEST IDLE 2 TCK 1.00E-02 SEC; """, file=svf) while True: chunk = bitf.read(max_row_size//8) if not chunk: break br_chunk = [bitreverse(x) for x in chunk] hex_chunk = ["{:02X}".format(x) for x in reversed(br_chunk)] print("\n".join(textwrap.wrap("SDR {} TDI ({});".format(8*len(chunk), "".join(hex_chunk)), 100)), file=svf) print(""" SIR 8 TDI (FF); RUNTEST IDLE 100 TCK 1.00E-02 SEC; SIR 8 TDI (C0); RUNTEST IDLE 2 TCK 1.00E-03 SEC; SDR 32 TDI (00000000) TDO (00000000) MASK (FFFFFFFF); ! Shift in ISC DISABLE(0x26) instruction SIR 8 TDI (26); RUNTEST IDLE 2 TCK 2.00E-01 SEC; ! Shift in BYPASS(0xFF) instruction SIR 8 TDI (FF); RUNTEST IDLE 2 TCK 1.00E-03 SEC; ! Shift in LSC_READ_STATUS(0x3C) instruction SIR 8 TDI (3C); SDR 32 TDI (00000000) TDO (00000100) MASK (00002100); """, file=svf) if __name__ == "__main__": bit_to_svf(sys.argv[1], sys.argv[2])
true
true
f702781d328a68fc3d131624d2296b76500bf539
2,425
py
Python
lib/spack/spack/container/images.py
cjy7117/spack
3582115d2af3ebd8156d742883c049e5b864eb2d
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
1
2020-09-02T11:55:57.000Z
2020-09-02T11:55:57.000Z
lib/spack/spack/container/images.py
cjy7117/spack
3582115d2af3ebd8156d742883c049e5b864eb2d
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
1
2021-03-23T17:08:14.000Z
2021-04-01T17:18:22.000Z
lib/spack/spack/container/images.py
cjy7117/spack
3582115d2af3ebd8156d742883c049e5b864eb2d
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
2
2020-01-10T18:54:54.000Z
2021-07-03T22:57:16.000Z
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) """Manages the details on the images used in the build and the run stage.""" import json import os.path #: Global variable used to cache in memory the content of images.json _data = None def data(): """Returns a dictionary with the static data on the images. The dictionary is read from a JSON file lazily the first time this function is called. """ global _data if not _data: json_dir = os.path.abspath(os.path.dirname(__file__)) json_file = os.path.join(json_dir, 'images.json') with open(json_file) as f: _data = json.load(f) return _data def build_info(image, spack_version): """Returns the name of the build image and its tag. Args: image (str): image to be used at run-time. Should be of the form <image_name>:<image_tag> e.g. "ubuntu:18.04" spack_version (str): version of Spack that we want to use to build Returns: A tuple with (image_name, image_tag) for the build image """ # Don't handle error here, as a wrong image should have been # caught by the JSON schema image_data = data()[image] build_image = image_data['build'] # Try to check if we have a tag for this Spack version try: # Translate version from git to docker if necessary build_tag = image_data['build_tags'].get(spack_version, spack_version) except KeyError: msg = ('the image "{0}" has no tag for Spack version "{1}" ' '[valid versions are {2}]') msg = msg.format(build_image, spack_version, ', '.join(image_data['build_tags'].keys())) raise ValueError(msg) return build_image, build_tag def package_info(image): """Returns the commands used to update system repositories, install system packages and clean afterwards. Args: image (str): image to be used at run-time. Should be of the form <image_name>:<image_tag> e.g. "ubuntu:18.04" Returns: A tuple of (update, install, clean) commands. """ image_data = data()[image] update = image_data['update'] install = image_data['install'] clean = image_data['clean'] return update, install, clean
32.77027
78
0.655258
import json import os.path _data = None def data(): global _data if not _data: json_dir = os.path.abspath(os.path.dirname(__file__)) json_file = os.path.join(json_dir, 'images.json') with open(json_file) as f: _data = json.load(f) return _data def build_info(image, spack_version): # caught by the JSON schema image_data = data()[image] build_image = image_data['build'] # Try to check if we have a tag for this Spack version try: # Translate version from git to docker if necessary build_tag = image_data['build_tags'].get(spack_version, spack_version) except KeyError: msg = ('the image "{0}" has no tag for Spack version "{1}" ' '[valid versions are {2}]') msg = msg.format(build_image, spack_version, ', '.join(image_data['build_tags'].keys())) raise ValueError(msg) return build_image, build_tag def package_info(image): image_data = data()[image] update = image_data['update'] install = image_data['install'] clean = image_data['clean'] return update, install, clean
true
true
f702783a2b2ea74ee55856da85cc7e661ba018dd
1,648
py
Python
misc/pythontools/processors/VolumeExtractChannel.py
grassofsky/modules
fe51de837fed6887228f2d3f8a455d5f4602d786
[ "BSD-2-Clause" ]
6
2019-01-31T09:38:31.000Z
2021-05-23T18:39:13.000Z
misc/pythontools/processors/VolumeExtractChannel.py
grassofsky/modules
fe51de837fed6887228f2d3f8a455d5f4602d786
[ "BSD-2-Clause" ]
65
2019-01-28T20:17:35.000Z
2022-02-25T08:08:03.000Z
misc/pythontools/processors/VolumeExtractChannel.py
grassofsky/modules
fe51de837fed6887228f2d3f8a455d5f4602d786
[ "BSD-2-Clause" ]
7
2019-09-15T20:06:03.000Z
2021-11-23T14:59:37.000Z
# Name: VolumeExtractChannel import inviwopy as ivw import numpy as np class VolumeExtractChannel(ivw.Processor): def __init__(self, id, name): ivw.Processor.__init__(self, id, name) self.inport = ivw.data.VolumeInport("inport") self.addInport(self.inport, owner=False) self.outport = ivw.data.VolumeOutport("outport") self.addOutport(self.outport, owner=False) self.channel = ivw.properties.IntProperty("channel", "channel", 0, 0, 4, 1) self.addProperty(self.channel, owner=False) @staticmethod def processorInfo(): return ivw.ProcessorInfo( classIdentifier = "org.inviwo.VolumeExtractChannel", displayName = "Volume Extract Channel", category = "Volume Operation", codeState = ivw.CodeState.Stable, tags = ivw.Tags.PY ) def getProcessorInfo(self): return VolumeExtractChannel.processorInfo() def process(self): volume = self.inport.getData() if len(volume.data.shape) <= 3: self.outport.setData(volume) return channels = volume.data.shape[3] volumeSlice = volume.data[:,:,:, np.clip(self.channel.value, 0, channels-1)] newVolume = ivw.data.Volume(volumeSlice) newVolume.dataMap = volume.dataMap newVolume.modelMatrix = volume.modelMatrix newVolume.worldMatrix = volume.worldMatrix newVolume.copyMetaDataFrom(volume) newVolume.swizzlemask = volume.swizzlemask newVolume.interpolation = volume.interpolation newVolume.wrapping = volume.wrapping self.outport.setData(newVolume)
33.632653
84
0.663835
import inviwopy as ivw import numpy as np class VolumeExtractChannel(ivw.Processor): def __init__(self, id, name): ivw.Processor.__init__(self, id, name) self.inport = ivw.data.VolumeInport("inport") self.addInport(self.inport, owner=False) self.outport = ivw.data.VolumeOutport("outport") self.addOutport(self.outport, owner=False) self.channel = ivw.properties.IntProperty("channel", "channel", 0, 0, 4, 1) self.addProperty(self.channel, owner=False) @staticmethod def processorInfo(): return ivw.ProcessorInfo( classIdentifier = "org.inviwo.VolumeExtractChannel", displayName = "Volume Extract Channel", category = "Volume Operation", codeState = ivw.CodeState.Stable, tags = ivw.Tags.PY ) def getProcessorInfo(self): return VolumeExtractChannel.processorInfo() def process(self): volume = self.inport.getData() if len(volume.data.shape) <= 3: self.outport.setData(volume) return channels = volume.data.shape[3] volumeSlice = volume.data[:,:,:, np.clip(self.channel.value, 0, channels-1)] newVolume = ivw.data.Volume(volumeSlice) newVolume.dataMap = volume.dataMap newVolume.modelMatrix = volume.modelMatrix newVolume.worldMatrix = volume.worldMatrix newVolume.copyMetaDataFrom(volume) newVolume.swizzlemask = volume.swizzlemask newVolume.interpolation = volume.interpolation newVolume.wrapping = volume.wrapping self.outport.setData(newVolume)
true
true
f7027871d8f121fbe03c77a55263ef697ac25229
853
py
Python
devscripts/PREAMBLE.py
dish59742/dulwich
d7f1331cba0ca959e8b701265b0c1547191a4726
[ "Apache-2.0" ]
624
2018-01-25T02:40:53.000Z
2022-02-02T12:38:55.000Z
devscripts/PREAMBLE.py
dish59742/dulwich
d7f1331cba0ca959e8b701265b0c1547191a4726
[ "Apache-2.0" ]
358
2015-01-06T11:36:42.000Z
2022-03-20T01:09:47.000Z
devscripts/PREAMBLE.py
dish59742/dulwich
d7f1331cba0ca959e8b701265b0c1547191a4726
[ "Apache-2.0" ]
166
2015-01-09T21:10:40.000Z
2022-03-15T08:20:35.000Z
# Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU # General Public License as public by the Free Software Foundation; version 2.0 # or (at your option) any later version. You can redistribute it and/or # modify it under the terms of either of these two licenses. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # You should have received a copy of the licenses; if not, see # <http://www.gnu.org/licenses/> for a copy of the GNU General Public License # and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache # License, Version 2.0. #
50.176471
79
0.764361
true
true
f7027877a6531c952b6a1d6afa0ea1d9608db0c1
879
py
Python
twisted/web2/auth/basic.py
twonds/twisted
d6e270a465d371c3bed01bf369af497b77eb9f1e
[ "Unlicense", "MIT" ]
1
2021-01-27T19:11:21.000Z
2021-01-27T19:11:21.000Z
twisted/web2/auth/basic.py
twonds/twisted
d6e270a465d371c3bed01bf369af497b77eb9f1e
[ "Unlicense", "MIT" ]
null
null
null
twisted/web2/auth/basic.py
twonds/twisted
d6e270a465d371c3bed01bf369af497b77eb9f1e
[ "Unlicense", "MIT" ]
3
2017-01-04T01:24:15.000Z
2020-06-18T16:14:56.000Z
# -*- test-case-name: twisted.web2.test.test_httpauth -*- from twisted.cred import credentials, error from twisted.web2.auth.interfaces import ICredentialFactory from zope.interface import implements class BasicCredentialFactory(object): """ Credential Factory for HTTP Basic Authentication """ implements(ICredentialFactory) scheme = 'basic' def __init__(self, realm): self.realm = realm def getChallenge(self, peer): return {'realm': self.realm} def decode(self, response, request): try: creds = (response + '===').decode('base64') except: raise error.LoginFailed('Invalid credentials') creds = creds.split(':', 1) if len(creds) == 2: return credentials.UsernamePassword(*creds) else: raise error.LoginFailed('Invalid credentials')
25.852941
59
0.641638
from twisted.cred import credentials, error from twisted.web2.auth.interfaces import ICredentialFactory from zope.interface import implements class BasicCredentialFactory(object): implements(ICredentialFactory) scheme = 'basic' def __init__(self, realm): self.realm = realm def getChallenge(self, peer): return {'realm': self.realm} def decode(self, response, request): try: creds = (response + '===').decode('base64') except: raise error.LoginFailed('Invalid credentials') creds = creds.split(':', 1) if len(creds) == 2: return credentials.UsernamePassword(*creds) else: raise error.LoginFailed('Invalid credentials')
true
true
f70278cea4e7162ad9cc5b7f21e6718f080b1563
15,383
py
Python
couchbase/management/buckets.py
kulabh/couchbase-python-client
eb52c53302dfa563e289f04af518d40b3921767b
[ "Apache-2.0" ]
null
null
null
couchbase/management/buckets.py
kulabh/couchbase-python-client
eb52c53302dfa563e289f04af518d40b3921767b
[ "Apache-2.0" ]
null
null
null
couchbase/management/buckets.py
kulabh/couchbase-python-client
eb52c53302dfa563e289f04af518d40b3921767b
[ "Apache-2.0" ]
null
null
null
from couchbase.management.admin import Admin from couchbase_core.mapper import BijectiveMapping, \ StringEnum, Identity, Timedelta, Bijection, StringEnumLoose from ..options import OptionBlockTimeOut, forward_args from couchbase.management.generic import GenericManager from typing import * from couchbase_core import abstractmethod, mk_formstr from couchbase_core.durability import Durability from couchbase.exceptions import HTTPException, ErrorMapper, BucketAlreadyExistsException, BucketDoesNotExistException import enum import datetime class BucketManagerErrorHandler(ErrorMapper): @staticmethod def mapping(): # type (...)->Mapping[str, CBErrorType] return {HTTPException: {'Bucket with given name (already|still) exists': BucketAlreadyExistsException, 'Requested resource not found': BucketDoesNotExistException}} @BucketManagerErrorHandler.wrap class BucketManager(GenericManager): def __init__(self, # type: BucketManager admin_bucket # type: Admin ): """Bucket Manager :param admin_bucket: Admin bucket """ super(BucketManager, self).__init__(admin_bucket) def create_bucket(self, # type: BucketManager settings, # type: CreateBucketSettings *options, # type: CreateBucketOptions **kwargs # type: Any ): """ Creates a new bucket. :param: CreateBucketSettings settings: settings for the bucket. :param: CreateBucketOptions options: options for setting the bucket. :param: Any kwargs: override corresponding values in the options. :raises: BucketAlreadyExistsException :raises: InvalidArgumentsException """ # prune the missing settings... params= settings.as_dict()#*options, **kwargs) # insure flushEnabled is an int params['flushEnabled'] = int(params.get('flushEnabled', 0)) # send it return self._admin_bucket.http_request( path='/pools/default/buckets', method='POST', content=mk_formstr(params), content_type='application/x-www-form-urlencoded', **forward_args(kwargs, *options)) def update_bucket(self, # type: BucketManager settings, # type: BucketSettings *options, # type: UpdateBucketOptions **kwargs # type: Any ): """ Updates a bucket. Every setting must be set to what the user wants it to be after the update. Any settings that are not set to their desired values may be reverted to default values by the server. :param BucketSettings settings: settings for updating the bucket. :param UpdateBucketOptions options: options for updating the bucket. :param Any kwargs: override corresponding values in the options. :raises: InvalidArgumentsException :raises: BucketDoesNotExistException """ # prune the missing settings... params = settings.as_dict ()#*options, **kwargs) # insure flushEnabled is an int params['flushEnabled'] = int(params.get('flushEnabled', 0)) # send it return self._admin_bucket.http_request( path='/pools/default/buckets/' + settings.name, method='POST', content_type='application/x-www-form-urlencoded', content=mk_formstr(params), **forward_args(kwargs, *options)) def drop_bucket(self, # type: BucketManager bucket_name, # type: str *options, # type: DropBucketOptions **kwargs # type: Any ): # type: (...) -> None """ Removes a bucket. :param str bucket_name: the name of the bucket. :param DropBucketOptions options: options for dropping the bucket. :param Any kwargs: override corresponding value in the options. :raises: BucketNotFoundException :raises: InvalidArgumentsException """ return self._admin_bucket.http_request( path='/pools/default/buckets/' + bucket_name, method='DELETE', **forward_args(kwargs, *options)) def get_bucket(self, # type: BucketManager bucket_name, # type: str *options, # type: GetBucketOptions **kwargs # type: Any ): # type: (...) -> BucketSettings """ Gets a bucket's settings. :param str bucket_name: the name of the bucket. :param GetBucketOptions options: options for getting the bucket. :param Any kwargs: override corresponding values in options. :returns: settings for the bucket. Note: the ram quota returned is in bytes not mb so requires x / 1024 twice. Also Note: FlushEnabled is not a setting returned by the server, if flush is enabled then the doFlush endpoint will be listed and should be used to populate the field. :rtype: BucketSettings :raises: BucketNotFoundException :raises: InvalidArgumentsException """ return BucketSettings.from_raw( self._admin_bucket.http_request( path='/pools/default/buckets/' + bucket_name, method='GET', **forward_args(kwargs, *options) ).value) def get_all_buckets(self, # type: BucketManager *options, # type: GetAllBucketOptions **kwargs # type: Any ): # type: (...) -> Iterable[BucketSettings] """ Gets all bucket settings. Note, # type: the ram quota returned is in bytes not mb so requires x / 1024 twice. :param GetAllBucketOptions options: options for getting all buckets. :param Any kwargs: override corresponding value in options. :returns: An iterable of settings for each bucket. :rtype: Iterable[BucketSettings] """ return list( map(lambda x: BucketSettings(**x), self._admin_bucket.http_request( path='/pools/default/buckets', method='GET', **forward_args(kwargs, *options) ).value)) def flush_bucket(self, # type: BucketManager bucket_name, # type: str *options, # type: FlushBucketOptions **kwargs # type: Any ): # using the ns_server REST interface """ Flushes a bucket (uses the ns_server REST interface). :param str bucket_name: the name of the bucket. :param FlushBucketOptions options: options for flushing the bucket. :param Any kwargs: override corresponding value in options. :raises: BucketNotFoundException :raises: InvalidArgumentsException :raises: FlushDisabledException """ self._admin_bucket.http_request( path="/pools/default/buckets/{bucket_name}/controller/doFlush".format(bucket_name=bucket_name), method='POST', **forward_args(kwargs, *options)) class EvictionPolicyType(enum.Enum): NOT_RECENTLY_USED = "nruEviction" NO_EVICTION = "noEviction" FULL = "fullEviction" VALUE_ONLY = "valueOnly" class EjectionMethod(enum.Enum): FULL_EVICTION = "fullEviction" VALUE_ONLY = "valueOnly" class BucketType(enum.Enum): COUCHBASE = "membase" MEMCACHED = "memcached" EPHEMERAL = "ephemeral" class CompressionMode(enum.Enum): OFF = "off" PASSIVE = "passive" ACTIVE = "active" class ConflictResolutionType(enum.Enum): TIMESTAMP = "lww" SEQUENCE_NUMBER = "seqno" class BucketSettings(dict): mapping = BijectiveMapping({'flushEnabled': {'flush_enabled': Bijection(int.__bool__, bool.__int__)}, 'numReplicas': {'num_replicas': Identity(int)}, 'ramQuotaMB': {'ram_quota_mb': Identity(int)}, 'replicaNumber': {'num_replicas': Identity(int)}, 'replicaIndex': {'replica_index': Identity(bool)}, 'bucketType': {'bucket_type': -StringEnumLoose(BucketType)}, 'maxTTL': {'max_ttl': -Timedelta(int)}, 'compressionMode': {'compression_mode': -StringEnum(CompressionMode)}, 'conflictResolutionType': { 'conflict_resolution_type': -StringEnumLoose(ConflictResolutionType)}, 'evictionPolicy': {'eviction_policy': -StringEnumLoose(EvictionPolicyType)}, 'ejectionMethod': {'ejection_method': -StringEnumLoose(EjectionMethod)}, 'name': {'name': Identity(str)}, 'durabilityMinLevel': {'minimum_durability_level': Identity(str)}}) @overload def __init__(self, name=None, # type: str flush_enabled=False, # type: bool ram_quota_mb=None, # type: int num_replicas=None, # type: int replica_index=None, # type: bool bucket_type=None, # type: BucketType eviction_policy=None, # type: EvictionPolicyType max_ttl=None, # type: Union[datetime.timedelta,float,int] compression_mode=None # type: CompressionMode ): # type: (...) -> None pass def __init__(self, **kwargs): """BucketSettings provides a means of mapping bucket settings into an object. """ if kwargs.get('bucket_type',None) == "couchbase": kwargs['bucket_type'] = BucketType.COUCHBASE """ PYCBC-956 Bucket min durability setting is represented as string on the wire. See Durability enum for string representations """ durability = kwargs.pop('minimum_durability_level', None) if durability: if isinstance(durability, Durability): kwargs['minimum_durability_level'] = durability.to_server_str() else: kwargs['minimum_durability_level'] = Durability.from_server_str(durability) super(BucketSettings, self).__init__(**self.mapping.sanitize_src(kwargs)) def as_dict(self, *options, **kwargs): final_opts = dict(**Admin.bc_defaults) final_opts.update(**forward_args(kwargs,*options)) params=self.mapping.to_src(self) params.update({ 'authType': 'sasl', 'saslPassword': final_opts['bucket_password'] }) return params @classmethod def from_raw(cls, raw_info # type: Mapping[str, Any] ): # type: (...) -> BucketSettings result = cls(**cls.mapping.to_dest(raw_info)) quota = raw_info.get('quota', {}) # convert rawRAM to MB if 'rawRAM' in quota: result['ram_quota_mb'] = quota.get('rawRAM') / 1024 / 1024 else: result['ram_quota_mb'] = None controllers = raw_info.get('controllers', {}) result['flush_enabled'] = ('flush' in controllers) return result @property def name(self): # type: (...) -> str """Name (string) - The name of the bucket.""" return self.get('name') @property def flush_enabled(self): # type: (...) -> bool """Whether or not flush should be enabled on the bucket. Default to false.""" return self.get('flush_enabled', False) @property def ram_quota_mb(self): # type: (...) -> int """Ram Quota in mb for the bucket. (rawRAM in the server payload)""" return self.get('ram_quota_mb') @property def num_replicas(self): # type: (...) -> int """NumReplicas (int) - The number of replicas for documents.""" return self.get('replica_number') @property def replica_index(self): # type: (...) -> bool """ Whether replica indexes should be enabled for the bucket.""" return self.get('replica_index') @property def bucket_type(self): # type: (...) -> BucketType """BucketType {couchbase (sent on wire as membase), memcached, ephemeral} The type of the bucket. Default to couchbase.""" return self.get('bucketType') @property def eviction_policy(self): # type: (...) -> EvictionPolicyType """{fullEviction | valueOnly}. The eviction policy to use.""" return self.get('eviction_policy') @property def max_ttl(self): # type: (...) -> datetime.timedelta """Value for the maxTTL of new documents created without a ttl.""" return self.get('max_ttl') @property def compression_mode(self): # type: (...) -> CompressionMode """{off | passive | active} - The compression mode to use.""" return self.get('compression_mode') class CreateBucketSettings(BucketSettings): @overload def __init__(self, name=None, # type: str flush_enabled=False, # type: bool ram_quota_mb=None, # type: int num_replicas=None, # type: int replica_index=None, # type: bool bucket_type=None, # type: BucketType eviction_policy=None, # type: EvictionPolicyType max_ttl=None, # type: Union[datetime.timedelta,float,int] compression_mode=None, # type: CompressionMode conflict_resolution_type=None, # type: ConflictResolutionType bucket_password=None, # type: str ejection_method=None # type: EjectionMethod ): """ Bucket creation settings. :param name: name of the bucket :param flush_enabled: whether flush is enabled :param ram_quota_mb: raw quota in megabytes :param num_replicas: number of replicas :param replica_index: whether this is a replica index :param bucket_type: type of bucket :param eviction_policy: policy for eviction :param max_ttl: max time to live for bucket :param compression_mode: compression mode :param ejection_method: ejection method (deprecated, please use eviction_policy instead) """ def __init__(self, **kwargs): BucketSettings.__init__(self, **kwargs) @property def conflict_resolution_type(self): # type: (...) -> ConflictResolutionType return self.get('conflict_resolution_type') class CreateBucketOptions(OptionBlockTimeOut): pass class UpdateBucketOptions(OptionBlockTimeOut): pass class DropBucketOptions(OptionBlockTimeOut): pass class GetAllBucketOptions(OptionBlockTimeOut): pass class GetBucketOptions(OptionBlockTimeOut): pass class FlushBucketOptions(OptionBlockTimeOut): pass
37.157005
213
0.595528
from couchbase.management.admin import Admin from couchbase_core.mapper import BijectiveMapping, \ StringEnum, Identity, Timedelta, Bijection, StringEnumLoose from ..options import OptionBlockTimeOut, forward_args from couchbase.management.generic import GenericManager from typing import * from couchbase_core import abstractmethod, mk_formstr from couchbase_core.durability import Durability from couchbase.exceptions import HTTPException, ErrorMapper, BucketAlreadyExistsException, BucketDoesNotExistException import enum import datetime class BucketManagerErrorHandler(ErrorMapper): @staticmethod def mapping(): return {HTTPException: {'Bucket with given name (already|still) exists': BucketAlreadyExistsException, 'Requested resource not found': BucketDoesNotExistException}} @BucketManagerErrorHandler.wrap class BucketManager(GenericManager): def __init__(self, admin_bucket ): super(BucketManager, self).__init__(admin_bucket) def create_bucket(self, settings, *options, **kwargs ): params= settings.as_dict() params['flushEnabled'] = int(params.get('flushEnabled', 0)) return self._admin_bucket.http_request( path='/pools/default/buckets', method='POST', content=mk_formstr(params), content_type='application/x-www-form-urlencoded', **forward_args(kwargs, *options)) def update_bucket(self, settings, *options, **kwargs ): params = settings.as_dict () params['flushEnabled'] = int(params.get('flushEnabled', 0)) return self._admin_bucket.http_request( path='/pools/default/buckets/' + settings.name, method='POST', content_type='application/x-www-form-urlencoded', content=mk_formstr(params), **forward_args(kwargs, *options)) def drop_bucket(self, bucket_name, *options, **kwargs ): return self._admin_bucket.http_request( path='/pools/default/buckets/' + bucket_name, method='DELETE', **forward_args(kwargs, *options)) def get_bucket(self, bucket_name, *options, **kwargs ): return BucketSettings.from_raw( self._admin_bucket.http_request( path='/pools/default/buckets/' + bucket_name, method='GET', **forward_args(kwargs, *options) ).value) def get_all_buckets(self, *options, **kwargs ): return list( map(lambda x: BucketSettings(**x), self._admin_bucket.http_request( path='/pools/default/buckets', method='GET', **forward_args(kwargs, *options) ).value)) def flush_bucket(self, bucket_name, *options, **kwargs ): self._admin_bucket.http_request( path="/pools/default/buckets/{bucket_name}/controller/doFlush".format(bucket_name=bucket_name), method='POST', **forward_args(kwargs, *options)) class EvictionPolicyType(enum.Enum): NOT_RECENTLY_USED = "nruEviction" NO_EVICTION = "noEviction" FULL = "fullEviction" VALUE_ONLY = "valueOnly" class EjectionMethod(enum.Enum): FULL_EVICTION = "fullEviction" VALUE_ONLY = "valueOnly" class BucketType(enum.Enum): COUCHBASE = "membase" MEMCACHED = "memcached" EPHEMERAL = "ephemeral" class CompressionMode(enum.Enum): OFF = "off" PASSIVE = "passive" ACTIVE = "active" class ConflictResolutionType(enum.Enum): TIMESTAMP = "lww" SEQUENCE_NUMBER = "seqno" class BucketSettings(dict): mapping = BijectiveMapping({'flushEnabled': {'flush_enabled': Bijection(int.__bool__, bool.__int__)}, 'numReplicas': {'num_replicas': Identity(int)}, 'ramQuotaMB': {'ram_quota_mb': Identity(int)}, 'replicaNumber': {'num_replicas': Identity(int)}, 'replicaIndex': {'replica_index': Identity(bool)}, 'bucketType': {'bucket_type': -StringEnumLoose(BucketType)}, 'maxTTL': {'max_ttl': -Timedelta(int)}, 'compressionMode': {'compression_mode': -StringEnum(CompressionMode)}, 'conflictResolutionType': { 'conflict_resolution_type': -StringEnumLoose(ConflictResolutionType)}, 'evictionPolicy': {'eviction_policy': -StringEnumLoose(EvictionPolicyType)}, 'ejectionMethod': {'ejection_method': -StringEnumLoose(EjectionMethod)}, 'name': {'name': Identity(str)}, 'durabilityMinLevel': {'minimum_durability_level': Identity(str)}}) @overload def __init__(self, name=None, flush_enabled=False, ram_quota_mb=None, num_replicas=None, replica_index=None, bucket_type=None, eviction_policy=None, max_ttl=None, compression_mode=None ): pass def __init__(self, **kwargs): if kwargs.get('bucket_type',None) == "couchbase": kwargs['bucket_type'] = BucketType.COUCHBASE durability = kwargs.pop('minimum_durability_level', None) if durability: if isinstance(durability, Durability): kwargs['minimum_durability_level'] = durability.to_server_str() else: kwargs['minimum_durability_level'] = Durability.from_server_str(durability) super(BucketSettings, self).__init__(**self.mapping.sanitize_src(kwargs)) def as_dict(self, *options, **kwargs): final_opts = dict(**Admin.bc_defaults) final_opts.update(**forward_args(kwargs,*options)) params=self.mapping.to_src(self) params.update({ 'authType': 'sasl', 'saslPassword': final_opts['bucket_password'] }) return params @classmethod def from_raw(cls, raw_info ): result = cls(**cls.mapping.to_dest(raw_info)) quota = raw_info.get('quota', {}) if 'rawRAM' in quota: result['ram_quota_mb'] = quota.get('rawRAM') / 1024 / 1024 else: result['ram_quota_mb'] = None controllers = raw_info.get('controllers', {}) result['flush_enabled'] = ('flush' in controllers) return result @property def name(self): return self.get('name') @property def flush_enabled(self): return self.get('flush_enabled', False) @property def ram_quota_mb(self): return self.get('ram_quota_mb') @property def num_replicas(self): return self.get('replica_number') @property def replica_index(self): return self.get('replica_index') @property def bucket_type(self): return self.get('bucketType') @property def eviction_policy(self): return self.get('eviction_policy') @property def max_ttl(self): return self.get('max_ttl') @property def compression_mode(self): return self.get('compression_mode') class CreateBucketSettings(BucketSettings): @overload def __init__(self, name=None, flush_enabled=False, ram_quota_mb=None, num_replicas=None, replica_index=None, bucket_type=None, eviction_policy=None, max_ttl=None, compression_mode=None, conflict_resolution_type=None, bucket_password=None, ejection_method=None ): def __init__(self, **kwargs): BucketSettings.__init__(self, **kwargs) @property def conflict_resolution_type(self): return self.get('conflict_resolution_type') class CreateBucketOptions(OptionBlockTimeOut): pass class UpdateBucketOptions(OptionBlockTimeOut): pass class DropBucketOptions(OptionBlockTimeOut): pass class GetAllBucketOptions(OptionBlockTimeOut): pass class GetBucketOptions(OptionBlockTimeOut): pass class FlushBucketOptions(OptionBlockTimeOut): pass
true
true
f70279cf9a6103ce7baf015e8ef13fb314e9824c
5,629
py
Python
core/eolearn/core/eotask.py
dreampedia20/eo-learn
f4994a1d6e910ba5d6ad877726ac2367048a44a1
[ "MIT" ]
1
2019-04-08T02:26:40.000Z
2019-04-08T02:26:40.000Z
core/eolearn/core/eotask.py
dreampedia20/eo-learn
f4994a1d6e910ba5d6ad877726ac2367048a44a1
[ "MIT" ]
1
2019-11-27T09:44:40.000Z
2019-11-27T09:44:40.000Z
core/eolearn/core/eotask.py
dreampedia20/eo-learn
f4994a1d6e910ba5d6ad877726ac2367048a44a1
[ "MIT" ]
1
2019-01-30T09:08:23.000Z
2019-01-30T09:08:23.000Z
""" This module implements the core class hierarchy for implementing EO tasks. An EO task is any class the inherits from the abstract EOTask class. Each EO task has to implement the execute method; invoking __call__ on a EO task instance invokes the execute method. EO tasks are meant primarily to operate on EO patches (i.e. instances of EOPatch). EO task classes are generally lightweight (i.e. not too complicated), short, and do one thing well. For example, an EO task might take as input an EOPatch containing cloud mask and return as a result the cloud coverage for that mask. Credits: Copyright (c) 2017-2019 Matej Aleksandrov, Matej Batič, Andrej Burja, Eva Erzin (Sinergise) Copyright (c) 2017-2019 Grega Milčinski, Matic Lubej, Devis Peresutti, Jernej Puc, Tomislav Slijepčević (Sinergise) Copyright (c) 2017-2019 Blaž Sovdat, Jovan Višnjić, Anže Zupanc, Lojze Žust (Sinergise) This source code is licensed under the MIT license found in the LICENSE file in the root directory of this source tree. """ import sys import logging import datetime import inspect from collections import OrderedDict from abc import ABC, abstractmethod import attr from .utilities import FeatureParser LOGGER = logging.getLogger(__name__) class EOTask(ABC): """Base class for EOTask.""" def __new__(cls, *args, **kwargs): """Stores initialization parameters and the order to the instance attribute `init_args`.""" self = super().__new__(cls) init_args = OrderedDict() for arg, value in zip(inspect.getfullargspec(self.__init__).args[1: len(args) + 1], args): init_args[arg] = repr(value) for arg in inspect.getfullargspec(self.__init__).args[len(args) + 1:]: if arg in kwargs: init_args[arg] = repr(kwargs[arg]) self.private_task_config = _PrivateTaskConfig(init_args=init_args) return self def __mul__(self, other): """Creates a composite task of this and passed task.""" return CompositeTask(other, self) def __call__(self, *eopatches, monitor=False, **kwargs): """Executes the task.""" # if monitor: # return self.execute_and_monitor(*eopatches, **kwargs) return self._execute_handling(*eopatches, **kwargs) def execute_and_monitor(self, *eopatches, **kwargs): """ In the current version nothing additional happens in this method """ return self._execute_handling(*eopatches, **kwargs) def _execute_handling(self, *eopatches, **kwargs): """ Handles measuring execution time and error propagation """ self.private_task_config.start_time = datetime.datetime.now() try: return_value = self.execute(*eopatches, **kwargs) self.private_task_config.end_time = datetime.datetime.now() return return_value except BaseException as exception: traceback = sys.exc_info()[2] # Some special exceptions don't accept an error message as a parameter and raise a TypeError in such case. try: errmsg = 'During execution of task {}: {}'.format(self.__class__.__name__, exception) extended_exception = type(exception)(errmsg) except TypeError: extended_exception = exception raise extended_exception.with_traceback(traceback) @abstractmethod def execute(self, *eopatches, **kwargs): """ Implement execute function """ raise NotImplementedError @staticmethod def _parse_features(features, new_names=False, rename_function=None, default_feature_type=None, allowed_feature_types=None): """ See eolearn.core.utilities.FeatureParser class. """ return FeatureParser(features, new_names=new_names, rename_function=rename_function, default_feature_type=default_feature_type, allowed_feature_types=allowed_feature_types) @attr.s(cmp=False) class _PrivateTaskConfig: """ A container for general EOTask parameters required during EOWorkflow and EOExecution :param init_args: A dictionary of parameters and values used for EOTask initialization :type init_args: OrderedDict :param uuid: An unique hexadecimal identifier string a task gets in EOWorkflow :type uuid: str or None :param start_time: Time when task execution started :type start_time: datetime.datetime or None :param end_time: Time when task execution ended :type end_time: datetime.datetime or None """ init_args = attr.ib() uuid = attr.ib(default=None) start_time = attr.ib(default=None) end_time = attr.ib(default=None) def __add__(self, other): return _PrivateTaskConfig(init_args=OrderedDict(list(self.init_args.items()) + list(other.init_args.items()))) class CompositeTask(EOTask): """Creates a task that is composite of two tasks. Note: Instead of directly using this task it might be more convenient to use `'*'` operation between tasks. Example: `composite_task = task1 * task2` :param eotask1: Task which will be executed first :type eotask1: EOTask :param eotask2: Task which will be executed on results of first task :type eotask2: EOTask """ def __init__(self, eotask1, eotask2): self.eotask1 = eotask1 self.eotask2 = eotask2 self.private_task_config = eotask1.private_task_config + eotask2.private_task_config def execute(self, *eopatches, **kwargs): return self.eotask2.execute(self.eotask1.execute(*eopatches, **kwargs))
39.363636
119
0.696927
import sys import logging import datetime import inspect from collections import OrderedDict from abc import ABC, abstractmethod import attr from .utilities import FeatureParser LOGGER = logging.getLogger(__name__) class EOTask(ABC): def __new__(cls, *args, **kwargs): self = super().__new__(cls) init_args = OrderedDict() for arg, value in zip(inspect.getfullargspec(self.__init__).args[1: len(args) + 1], args): init_args[arg] = repr(value) for arg in inspect.getfullargspec(self.__init__).args[len(args) + 1:]: if arg in kwargs: init_args[arg] = repr(kwargs[arg]) self.private_task_config = _PrivateTaskConfig(init_args=init_args) return self def __mul__(self, other): return CompositeTask(other, self) def __call__(self, *eopatches, monitor=False, **kwargs): return self._execute_handling(*eopatches, **kwargs) def execute_and_monitor(self, *eopatches, **kwargs): return self._execute_handling(*eopatches, **kwargs) def _execute_handling(self, *eopatches, **kwargs): self.private_task_config.start_time = datetime.datetime.now() try: return_value = self.execute(*eopatches, **kwargs) self.private_task_config.end_time = datetime.datetime.now() return return_value except BaseException as exception: traceback = sys.exc_info()[2] try: errmsg = 'During execution of task {}: {}'.format(self.__class__.__name__, exception) extended_exception = type(exception)(errmsg) except TypeError: extended_exception = exception raise extended_exception.with_traceback(traceback) @abstractmethod def execute(self, *eopatches, **kwargs): raise NotImplementedError @staticmethod def _parse_features(features, new_names=False, rename_function=None, default_feature_type=None, allowed_feature_types=None): return FeatureParser(features, new_names=new_names, rename_function=rename_function, default_feature_type=default_feature_type, allowed_feature_types=allowed_feature_types) @attr.s(cmp=False) class _PrivateTaskConfig: init_args = attr.ib() uuid = attr.ib(default=None) start_time = attr.ib(default=None) end_time = attr.ib(default=None) def __add__(self, other): return _PrivateTaskConfig(init_args=OrderedDict(list(self.init_args.items()) + list(other.init_args.items()))) class CompositeTask(EOTask): def __init__(self, eotask1, eotask2): self.eotask1 = eotask1 self.eotask2 = eotask2 self.private_task_config = eotask1.private_task_config + eotask2.private_task_config def execute(self, *eopatches, **kwargs): return self.eotask2.execute(self.eotask1.execute(*eopatches, **kwargs))
true
true
f7027ac26fa896bc6deaf3d010bd7334ab987560
1,875
py
Python
src/kol/request/RespondToTradeRequest.py
danheath/temppykol
7f9621b44df9f9d2d9fc0a5b2a06db116b9ccfab
[ "BSD-3-Clause" ]
19
2015-02-16T08:30:49.000Z
2020-05-01T06:06:33.000Z
src/kol/request/RespondToTradeRequest.py
danheath/temppykol
7f9621b44df9f9d2d9fc0a5b2a06db116b9ccfab
[ "BSD-3-Clause" ]
5
2015-01-13T23:01:54.000Z
2016-11-30T15:23:43.000Z
src/kol/request/RespondToTradeRequest.py
danheath/temppykol
7f9621b44df9f9d2d9fc0a5b2a06db116b9ccfab
[ "BSD-3-Clause" ]
19
2015-05-28T09:36:19.000Z
2022-03-15T23:19:29.000Z
from kol.request.GenericRequest import GenericRequest from kol.manager import PatternManager import kol.Error as Error from kol.util import Report class RespondToTradeRequest(GenericRequest): def __init__(self, session, tradeid, items=None, meat=0, message=""): super(RespondToTradeRequest, self).__super__(session) self.url = session.serverURL + "makeoffer.php" self.requestData['action'] = 'counter' self.requestData['pwd'] = session.pwd self.requestData['whichoffer'] = tradeid self.requestData['offermeat'] = meat self.requestData['memo2'] = message ctr = 1 for item in items: self.requestData['whichitem' + str(ctr)] = item['itemID'] self.requestData['howmany' + str(ctr)] = item['quantity'] ctr += 1 def parseResponse(self): noMeatPattern = PatternManager.getOrCompilePattern('traderHasNotEnoughMeat') if noMeatPattern.search(self.responseText): raise Error.Error("You don't have as much meat as you're promising.", Error.NOT_ENOUGH_MEAT) noItemsPattern = PatternManager.getOrCompilePattern('traderHasNotEnoughItems') if noItemsPattern.search(self.responseText): raise Error.Error("You don't have as many items as you're promising.", Error.NOT_ENOUGH_ITEMS) #Not testing for an offer being cancelled due to a bug in KoL - space reserved successPattern = PatternManager.getOrCompilePattern('tradeResponseSentSuccessfully') if successPattern.search(self.responseText): Report.trace("request", "Response to trade " + str(self.requestData['whichoffer']) + ' sent successfully.') else: raise Error.Error("Unknown error sending response to trade " + str(self.requestData['whichoffer']), Error.REQUEST_GENERIC)
50.675676
134
0.679467
from kol.request.GenericRequest import GenericRequest from kol.manager import PatternManager import kol.Error as Error from kol.util import Report class RespondToTradeRequest(GenericRequest): def __init__(self, session, tradeid, items=None, meat=0, message=""): super(RespondToTradeRequest, self).__super__(session) self.url = session.serverURL + "makeoffer.php" self.requestData['action'] = 'counter' self.requestData['pwd'] = session.pwd self.requestData['whichoffer'] = tradeid self.requestData['offermeat'] = meat self.requestData['memo2'] = message ctr = 1 for item in items: self.requestData['whichitem' + str(ctr)] = item['itemID'] self.requestData['howmany' + str(ctr)] = item['quantity'] ctr += 1 def parseResponse(self): noMeatPattern = PatternManager.getOrCompilePattern('traderHasNotEnoughMeat') if noMeatPattern.search(self.responseText): raise Error.Error("You don't have as much meat as you're promising.", Error.NOT_ENOUGH_MEAT) noItemsPattern = PatternManager.getOrCompilePattern('traderHasNotEnoughItems') if noItemsPattern.search(self.responseText): raise Error.Error("You don't have as many items as you're promising.", Error.NOT_ENOUGH_ITEMS) successPattern = PatternManager.getOrCompilePattern('tradeResponseSentSuccessfully') if successPattern.search(self.responseText): Report.trace("request", "Response to trade " + str(self.requestData['whichoffer']) + ' sent successfully.') else: raise Error.Error("Unknown error sending response to trade " + str(self.requestData['whichoffer']), Error.REQUEST_GENERIC)
true
true
f7027b3f06db28c8a66a60541055054a89ae5668
6,821
py
Python
env/lib/python3.8/site-packages/plotly/graph_objs/volume/_caps.py
acrucetta/Chicago_COVI_WebApp
a37c9f492a20dcd625f8647067394617988de913
[ "MIT", "Unlicense" ]
11,750
2015-10-12T07:03:39.000Z
2022-03-31T20:43:15.000Z
env/lib/python3.8/site-packages/plotly/graph_objs/volume/_caps.py
acrucetta/Chicago_COVI_WebApp
a37c9f492a20dcd625f8647067394617988de913
[ "MIT", "Unlicense" ]
2,951
2015-10-12T00:41:25.000Z
2022-03-31T22:19:26.000Z
env/lib/python3.8/site-packages/plotly/graph_objs/volume/_caps.py
acrucetta/Chicago_COVI_WebApp
a37c9f492a20dcd625f8647067394617988de913
[ "MIT", "Unlicense" ]
2,623
2015-10-15T14:40:27.000Z
2022-03-28T16:05:50.000Z
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType import copy as _copy class Caps(_BaseTraceHierarchyType): # class properties # -------------------- _parent_path_str = "volume" _path_str = "volume.caps" _valid_props = {"x", "y", "z"} # x # - @property def x(self): """ The 'x' property is an instance of X that may be specified as: - An instance of :class:`plotly.graph_objs.volume.caps.X` - A dict of string/value properties that will be passed to the X constructor Supported dict properties: fill Sets the fill ratio of the `caps`. The default fill value of the `caps` is 1 meaning that they are entirely shaded. On the other hand Applying a `fill` ratio less than one would allow the creation of openings parallel to the edges. show Sets the fill ratio of the `slices`. The default fill value of the x `slices` is 1 meaning that they are entirely shaded. On the other hand Applying a `fill` ratio less than one would allow the creation of openings parallel to the edges. Returns ------- plotly.graph_objs.volume.caps.X """ return self["x"] @x.setter def x(self, val): self["x"] = val # y # - @property def y(self): """ The 'y' property is an instance of Y that may be specified as: - An instance of :class:`plotly.graph_objs.volume.caps.Y` - A dict of string/value properties that will be passed to the Y constructor Supported dict properties: fill Sets the fill ratio of the `caps`. The default fill value of the `caps` is 1 meaning that they are entirely shaded. On the other hand Applying a `fill` ratio less than one would allow the creation of openings parallel to the edges. show Sets the fill ratio of the `slices`. The default fill value of the y `slices` is 1 meaning that they are entirely shaded. On the other hand Applying a `fill` ratio less than one would allow the creation of openings parallel to the edges. Returns ------- plotly.graph_objs.volume.caps.Y """ return self["y"] @y.setter def y(self, val): self["y"] = val # z # - @property def z(self): """ The 'z' property is an instance of Z that may be specified as: - An instance of :class:`plotly.graph_objs.volume.caps.Z` - A dict of string/value properties that will be passed to the Z constructor Supported dict properties: fill Sets the fill ratio of the `caps`. The default fill value of the `caps` is 1 meaning that they are entirely shaded. On the other hand Applying a `fill` ratio less than one would allow the creation of openings parallel to the edges. show Sets the fill ratio of the `slices`. The default fill value of the z `slices` is 1 meaning that they are entirely shaded. On the other hand Applying a `fill` ratio less than one would allow the creation of openings parallel to the edges. Returns ------- plotly.graph_objs.volume.caps.Z """ return self["z"] @z.setter def z(self, val): self["z"] = val # Self properties description # --------------------------- @property def _prop_descriptions(self): return """\ x :class:`plotly.graph_objects.volume.caps.X` instance or dict with compatible properties y :class:`plotly.graph_objects.volume.caps.Y` instance or dict with compatible properties z :class:`plotly.graph_objects.volume.caps.Z` instance or dict with compatible properties """ def __init__(self, arg=None, x=None, y=None, z=None, **kwargs): """ Construct a new Caps object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.volume.Caps` x :class:`plotly.graph_objects.volume.caps.X` instance or dict with compatible properties y :class:`plotly.graph_objects.volume.caps.Y` instance or dict with compatible properties z :class:`plotly.graph_objects.volume.caps.Z` instance or dict with compatible properties Returns ------- Caps """ super(Caps, self).__init__("caps") if "_parent" in kwargs: self._parent = kwargs["_parent"] return # Validate arg # ------------ if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError( """\ The first argument to the plotly.graph_objs.volume.Caps constructor must be a dict or an instance of :class:`plotly.graph_objs.volume.Caps`""" ) # Handle skip_invalid # ------------------- self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) # Populate data dict with properties # ---------------------------------- _v = arg.pop("x", None) _v = x if x is not None else _v if _v is not None: self["x"] = _v _v = arg.pop("y", None) _v = y if y is not None else _v if _v is not None: self["y"] = _v _v = arg.pop("z", None) _v = z if z is not None else _v if _v is not None: self["z"] = _v # Process unknown kwargs # ---------------------- self._process_kwargs(**dict(arg, **kwargs)) # Reset skip_invalid # ------------------ self._skip_invalid = False
32.174528
82
0.511215
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType import copy as _copy class Caps(_BaseTraceHierarchyType): _parent_path_str = "volume" _path_str = "volume.caps" _valid_props = {"x", "y", "z"} @property def x(self): return self["x"] @x.setter def x(self, val): self["x"] = val @property def y(self): return self["y"] @y.setter def y(self, val): self["y"] = val @property def z(self): return self["z"] @z.setter def z(self, val): self["z"] = val @property def _prop_descriptions(self): return """\ x :class:`plotly.graph_objects.volume.caps.X` instance or dict with compatible properties y :class:`plotly.graph_objects.volume.caps.Y` instance or dict with compatible properties z :class:`plotly.graph_objects.volume.caps.Z` instance or dict with compatible properties """ def __init__(self, arg=None, x=None, y=None, z=None, **kwargs): super(Caps, self).__init__("caps") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError( """\ The first argument to the plotly.graph_objs.volume.Caps constructor must be a dict or an instance of :class:`plotly.graph_objs.volume.Caps`""" ) self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) _v = arg.pop("x", None) _v = x if x is not None else _v if _v is not None: self["x"] = _v _v = arg.pop("y", None) _v = y if y is not None else _v if _v is not None: self["y"] = _v _v = arg.pop("z", None) _v = z if z is not None else _v if _v is not None: self["z"] = _v self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
true
true
f7027bdbfcc7842b76bfb89ba52eca7263568712
1,768
py
Python
aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20180510/DescribeCdnDeletedDomainsRequest.py
yndu13/aliyun-openapi-python-sdk
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
[ "Apache-2.0" ]
1,001
2015-07-24T01:32:41.000Z
2022-03-25T01:28:18.000Z
aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20180510/DescribeCdnDeletedDomainsRequest.py
yndu13/aliyun-openapi-python-sdk
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
[ "Apache-2.0" ]
363
2015-10-20T03:15:00.000Z
2022-03-08T12:26:19.000Z
aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20180510/DescribeCdnDeletedDomainsRequest.py
yndu13/aliyun-openapi-python-sdk
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
[ "Apache-2.0" ]
682
2015-09-22T07:19:02.000Z
2022-03-22T09:51:46.000Z
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest from aliyunsdkcdn.endpoint import endpoint_data class DescribeCdnDeletedDomainsRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'Cdn', '2018-05-10', 'DescribeCdnDeletedDomains') self.set_method('POST') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_PageNumber(self): return self.get_query_params().get('PageNumber') def set_PageNumber(self,PageNumber): self.add_query_param('PageNumber',PageNumber) def get_PageSize(self): return self.get_query_params().get('PageSize') def set_PageSize(self,PageSize): self.add_query_param('PageSize',PageSize) def get_OwnerId(self): return self.get_query_params().get('OwnerId') def set_OwnerId(self,OwnerId): self.add_query_param('OwnerId',OwnerId)
35.36
78
0.765271
from aliyunsdkcore.request import RpcRequest from aliyunsdkcdn.endpoint import endpoint_data class DescribeCdnDeletedDomainsRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'Cdn', '2018-05-10', 'DescribeCdnDeletedDomains') self.set_method('POST') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_PageNumber(self): return self.get_query_params().get('PageNumber') def set_PageNumber(self,PageNumber): self.add_query_param('PageNumber',PageNumber) def get_PageSize(self): return self.get_query_params().get('PageSize') def set_PageSize(self,PageSize): self.add_query_param('PageSize',PageSize) def get_OwnerId(self): return self.get_query_params().get('OwnerId') def set_OwnerId(self,OwnerId): self.add_query_param('OwnerId',OwnerId)
true
true
f7027cc40744bece58196657f7f41d0dc93f9bce
346
py
Python
week1/1.05 classes/step09 buffer.py
TheNovel/stepik-python-fundamentals-and-application
4bf6838cfdb2323da2d8d52cfe393d61a4bb70cc
[ "MIT" ]
null
null
null
week1/1.05 classes/step09 buffer.py
TheNovel/stepik-python-fundamentals-and-application
4bf6838cfdb2323da2d8d52cfe393d61a4bb70cc
[ "MIT" ]
null
null
null
week1/1.05 classes/step09 buffer.py
TheNovel/stepik-python-fundamentals-and-application
4bf6838cfdb2323da2d8d52cfe393d61a4bb70cc
[ "MIT" ]
null
null
null
class Buffer: def __init__(self): self.lst = list() def add(self, *a): for value in a: self.lst.append(value) while len(self.lst) >= 5: s = 0 for i in range(5): s += self.lst.pop(0) print(s) def get_current_part(self): return self.lst
20.352941
36
0.465318
class Buffer: def __init__(self): self.lst = list() def add(self, *a): for value in a: self.lst.append(value) while len(self.lst) >= 5: s = 0 for i in range(5): s += self.lst.pop(0) print(s) def get_current_part(self): return self.lst
true
true
f7027d1166fe593295d878cd3bf04e354cc16d40
2,262
py
Python
release/stubs.min/Autodesk/Revit/DB/__init___parts/GeometryObject.py
YKato521/ironpython-stubs
b1f7c580de48528490b3ee5791b04898be95a9ae
[ "MIT" ]
null
null
null
release/stubs.min/Autodesk/Revit/DB/__init___parts/GeometryObject.py
YKato521/ironpython-stubs
b1f7c580de48528490b3ee5791b04898be95a9ae
[ "MIT" ]
null
null
null
release/stubs.min/Autodesk/Revit/DB/__init___parts/GeometryObject.py
YKato521/ironpython-stubs
b1f7c580de48528490b3ee5791b04898be95a9ae
[ "MIT" ]
null
null
null
class GeometryObject(APIObject, IDisposable): """ The common base class for all geometric primitives. """ def Dispose(self): """ Dispose(self: APIObject,A_0: bool) """ pass def Equals(self, obj): """ Equals(self: GeometryObject,obj: object) -> bool Determines whether the specified System.Object is equal to the current System.Object. obj: Another object. """ pass def GetHashCode(self): """ GetHashCode(self: GeometryObject) -> int Gets the integer value of the geometry object as hash code """ pass def ReleaseManagedResources(self, *args): """ ReleaseManagedResources(self: APIObject) """ pass def ReleaseUnmanagedResources(self, *args): """ ReleaseUnmanagedResources(self: GeometryObject) """ pass def __enter__(self, *args): """ __enter__(self: IDisposable) -> object """ pass def __eq__(self, *args): """ x.__eq__(y) <==> x==y """ pass def __exit__(self, *args): """ __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """ pass def __init__(self, *args): """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __ne__(self, *args): pass GraphicsStyleId = property( lambda self: object(), lambda self, v: None, lambda self: None ) """The ElementId of the GeometryObject's GraphicsStyle Get: GraphicsStyleId(self: GeometryObject) -> ElementId """ IsElementGeometry = property( lambda self: object(), lambda self, v: None, lambda self: None ) """Indicates whether this geometry is obtained directly from an Element. Get: IsElementGeometry(self: GeometryObject) -> bool """ Visibility = property( lambda self: object(), lambda self, v: None, lambda self: None ) """The visibility. Get: Visibility(self: GeometryObject) -> Visibility """
22.39604
221
0.598585
class GeometryObject(APIObject, IDisposable): def Dispose(self): pass def Equals(self, obj): pass def GetHashCode(self): pass def ReleaseManagedResources(self, *args): pass def ReleaseUnmanagedResources(self, *args): pass def __enter__(self, *args): pass def __eq__(self, *args): pass def __exit__(self, *args): pass def __init__(self, *args): pass def __ne__(self, *args): pass GraphicsStyleId = property( lambda self: object(), lambda self, v: None, lambda self: None ) IsElementGeometry = property( lambda self: object(), lambda self, v: None, lambda self: None ) Visibility = property( lambda self: object(), lambda self, v: None, lambda self: None )
true
true
f7027dcd4d2737c0d58aa41af45dd91d95328e07
1,607
py
Python
handson2_q_qi.py
blacktanktop/techcircle_openai_handson
54ae9de6c488cbbff9bda1be11fd0ecd1e681049
[ "MIT" ]
1
2021-09-15T09:49:49.000Z
2021-09-15T09:49:49.000Z
handson2_q_qi.py
blacktanktop/techcircle_openai_handson
54ae9de6c488cbbff9bda1be11fd0ecd1e681049
[ "MIT" ]
null
null
null
handson2_q_qi.py
blacktanktop/techcircle_openai_handson
54ae9de6c488cbbff9bda1be11fd0ecd1e681049
[ "MIT" ]
null
null
null
import os import math import argparse import gym from agents.q_agent import Q, Agent, Trainer RECORD_PATH = os.path.join(os.path.dirname(__file__), "./upload") def main(episodes, render, monitor): env = gym.make("CartPole-v0") q = Q( env.action_space.n, env.observation_space, bin_size=[7, 7, 7, 7], low_bound=[-5, -0.5, -5, -0.5], high_bound=[5, 0.5, 5, 0.5] ) agent = Agent(q, epsilon=0.05) learning_decay = lambda lr, t: 1 / (t + 1) ** 0.5 epsilon_decay = lambda eps, t: 1 / (t + 1) ** 0.5 trainer = Trainer( agent, gamma=0.95, learning_rate=0.1, learning_rate_decay=learning_decay, epsilon=1.0, epsilon_decay=epsilon_decay, max_step=250) if monitor: env.monitor.start(RECORD_PATH) trainer.train(env, episode_count=episodes, render=render) if monitor: env.monitor.close() if __name__ == "__main__": parser = argparse.ArgumentParser(description="train & run cartpole ") parser.add_argument("--episode", type=int, default=1000, help="episode to train") parser.add_argument("--render", action="store_true", help="render the screen") parser.add_argument("--monitor", action="store_true", help="monitor") parser.add_argument("--upload", type=str, default="", help="upload key to openai gym (training is not executed)") args = parser.parse_args() if args.upload: if os.path.isdir(RECORD_PATH): gym.upload(RECORD_PATH, api_key=args.upload) else: main(args.episode, args.render, args.monitor)
29.218182
117
0.634723
import os import math import argparse import gym from agents.q_agent import Q, Agent, Trainer RECORD_PATH = os.path.join(os.path.dirname(__file__), "./upload") def main(episodes, render, monitor): env = gym.make("CartPole-v0") q = Q( env.action_space.n, env.observation_space, bin_size=[7, 7, 7, 7], low_bound=[-5, -0.5, -5, -0.5], high_bound=[5, 0.5, 5, 0.5] ) agent = Agent(q, epsilon=0.05) learning_decay = lambda lr, t: 1 / (t + 1) ** 0.5 epsilon_decay = lambda eps, t: 1 / (t + 1) ** 0.5 trainer = Trainer( agent, gamma=0.95, learning_rate=0.1, learning_rate_decay=learning_decay, epsilon=1.0, epsilon_decay=epsilon_decay, max_step=250) if monitor: env.monitor.start(RECORD_PATH) trainer.train(env, episode_count=episodes, render=render) if monitor: env.monitor.close() if __name__ == "__main__": parser = argparse.ArgumentParser(description="train & run cartpole ") parser.add_argument("--episode", type=int, default=1000, help="episode to train") parser.add_argument("--render", action="store_true", help="render the screen") parser.add_argument("--monitor", action="store_true", help="monitor") parser.add_argument("--upload", type=str, default="", help="upload key to openai gym (training is not executed)") args = parser.parse_args() if args.upload: if os.path.isdir(RECORD_PATH): gym.upload(RECORD_PATH, api_key=args.upload) else: main(args.episode, args.render, args.monitor)
true
true
f7027dff8780b6911154978d2292dd01768881d2
6,554
py
Python
data_related/data_augmentation/signal_augment.py
dertilo/speech-recognition
32dfd0a05480ecb3a4ea3eb9e28628da976e7065
[ "MIT" ]
1
2020-06-30T20:48:04.000Z
2020-06-30T20:48:04.000Z
data_related/data_augmentation/signal_augment.py
dertilo/speech-recognition
32dfd0a05480ecb3a4ea3eb9e28628da976e7065
[ "MIT" ]
null
null
null
data_related/data_augmentation/signal_augment.py
dertilo/speech-recognition
32dfd0a05480ecb3a4ea3eb9e28628da976e7065
[ "MIT" ]
null
null
null
import os import subprocess import numpy as np from tqdm import tqdm from typing import Dict MAX_FREQ = 7999 def to_str(v): if isinstance(v, tuple): s = " ".join(str(x) for x in v) elif isinstance(v, float) or isinstance(v, int): s = str(v) else: assert False return s def build_sox_distortions(audio_file, params): param_str = " ".join([k + " " + to_str(v) for k, v in params.items()]) sox_params = "sox {} -p {} ".format(audio_file, param_str) return sox_params def build_sox_noise( audio_file, amod_lowpass_cutoff=0.1, lowpass_cutoff=MAX_FREQ, highpass_cutoff=1, noise_gain=-4, ): """ play original.wav synth whitenoise lowpass 0.1 synth whitenoise amod gain -n 0 lowpass 100 highpass 1 """ sox_params = "sox {audio_file} -p synth whitenoise lowpass {amod_lowpass_cutoff} synth whitenoise amod gain -n {noise_gain} lowpass {lowpass_cutoff} highpass {highpass_cutoff}".format( audio_file=audio_file, amod_lowpass_cutoff=amod_lowpass_cutoff, lowpass_cutoff=lowpass_cutoff, highpass_cutoff=highpass_cutoff, noise_gain=noise_gain, ) return sox_params def build_varying_amplitude_factor(audio_file, lowpass_cutoff=1, ac_gain=-9): ac = "sox {} -p synth whitenoise lowpass {} gain -n {}".format( audio_file, lowpass_cutoff, ac_gain ) dc = "sox {} -p gain -90 dcshift 0.5".format(audio_file) return "sox -m <({}) <({}) -p".format(ac, dc) def multiply_signals(signal_a, signal_b): return ("sox -T <({signal_a}) <({signal_b}) -p").format( signal_a=signal_a, signal_b=signal_b, ) def build_sox_interference( interfere_file, interfere_signal, lowpass_cutoff=1, ac_gain=-6 ): factor = build_varying_amplitude_factor(interfere_file, lowpass_cutoff, ac_gain) return multiply_signals(factor, interfere_signal) def add_signals_trim_to_len(original, signals, augmented): signals_to_add = " ".join(["<(%s)" % s for s in signals]) sox_cmd = "sox -m {signals} -b 16 {augmented} trim 0 $(soxi -D {original})".format( signals=signals_to_add, original=original, augmented=augmented ) return sox_cmd def build_random_bandpass(min_low=50, min_band_width=100, max_high=1000) -> Dict: d = {} max_high_cutoff = MAX_FREQ if np.random.choice([True, False], p=[0.5, 0.5]): lowpass = int(round(np.random.uniform(low=min_low, high=MAX_FREQ))) d["lowpass"] = lowpass max_high_cutoff = lowpass - min_band_width if np.random.choice([True, False], p=[0.5, 0.5]): highpass = int( round(np.random.uniform(low=1, high=min(max_high, max_high_cutoff))) ) d["highpass"] = highpass return d def augment_with_sox(original_file, audio_files, augmented_file): interfere_file = np.random.choice(audio_files) min_SNR = 20 # normal:20, less:30, evenless:40 min_SIR = 5 # normal:10, less:20, evenless:30 signal_gain = round(np.random.uniform(low=-10, high=0), 2) signal_params = { "tempo": round(np.random.triangular(left=0.7, mode=1.0, right=1.3), 2), "pitch": int( round(np.random.triangular(left=-200, mode=0, right=200)) ), # normal 100, less: 50, evenless: 30 "reverb": (int(round(np.random.uniform(low=0, high=50))), 50, 100, 100, 0, 0,), "gain -n": signal_gain, } signal_params.update(build_random_bandpass(1000, 1000, 100)) interfere_params = { "tempo": round(np.random.uniform(low=0.6, high=1.4), 2), "pitch": int(round(np.random.uniform(low=-500, high=500))), "reverb": (int(round(np.random.uniform(low=0, high=100))), 50, 100, 100, 0, 0), "gain -n": round(np.random.uniform(low=-50, high=signal_gain - min_SIR), 2), } interfere_params.update(build_random_bandpass(50, 100, 1000)) # params = {'signal_params':signal_params,'interfere_params':interfere_params,'noise_power':noise_power} # pprint(params) signal = build_sox_distortions(original_file, signal_params) interfere_signal = build_sox_distortions(interfere_file, interfere_params) noise_power = round(np.random.uniform(-60, signal_gain - min_SNR), 2) lowpass = int(round(np.random.uniform(low=100, high=MAX_FREQ))) highpass = int(round(np.random.uniform(low=1, high=lowpass))) noise = build_sox_noise( original_file, np.random.uniform(0.1, 2), lowpass, highpass, noise_power ) interf = build_sox_interference( interfere_file, interfere_signal, lowpass_cutoff=np.random.uniform(0.5, 2), ac_gain=int(round(np.random.uniform(-9, -3))), ) sox_cmd = add_signals_trim_to_len( original_file, [signal, noise, interf], augmented_file ) FNULL = open(os.devnull, "w") subprocess.call(["bash", "-c", sox_cmd], stdout=FNULL, stderr=subprocess.STDOUT) # subprocess.call(["bash", "-c", sox_cmd]) # output = subprocess.check_output(["bash", "-c", sox_cmd]) # if len(output)>0 and 'FAIL' in output: # print(output) # return 1 if len(output)>0 else 0 def augment_with_specific_params(): signal_gain = 0 signal_params = dict(tempo=1.0, pitch=0, reverb=0) signal_params["gain -n"] = 0 signal = build_sox_distortions(original, signal_params) interfere_signal = build_sox_distortions( interfering, dict(gain=signal_gain - 10, tempo=0.8, pitch=100, reverb=50) ) noise = build_sox_noise( original, noise_gain=signal_gain - 20, lowpass_cutoff=6000, highpass_cutoff=10 ) interf = build_sox_interference(interfering, interfere_signal) sox_cmd = add_signals_trim_to_len(original, [signal, noise, interf], augmented) subprocess.call(["bash", "-c", sox_cmd]) if __name__ == "__main__": import librosa original = "../../original.wav" augmented = "/tmp/augmented.wav" interfering = "../../interference.wav" # augment_with_specific_params() for k in range(9): augment_with_sox(original, [interfering], "/tmp/augmented_%d.wav" % k) # assert False # path = os.environ['HOME']+"/data/asr_data/SPANISH" # audio_files = librosa.util.find_files(path) # # with open('spanish_train_manifest.csv') as f: # audio_text_files = f.readlines() # audio_files = [x.strip().split(",")[0] for x in audio_text_files] # # for k in tqdm(range(100000)): # original = np.random.choice(audio_files) # random_augmentation(original, audio_files, augmented)
34.677249
188
0.663412
import os import subprocess import numpy as np from tqdm import tqdm from typing import Dict MAX_FREQ = 7999 def to_str(v): if isinstance(v, tuple): s = " ".join(str(x) for x in v) elif isinstance(v, float) or isinstance(v, int): s = str(v) else: assert False return s def build_sox_distortions(audio_file, params): param_str = " ".join([k + " " + to_str(v) for k, v in params.items()]) sox_params = "sox {} -p {} ".format(audio_file, param_str) return sox_params def build_sox_noise( audio_file, amod_lowpass_cutoff=0.1, lowpass_cutoff=MAX_FREQ, highpass_cutoff=1, noise_gain=-4, ): sox_params = "sox {audio_file} -p synth whitenoise lowpass {amod_lowpass_cutoff} synth whitenoise amod gain -n {noise_gain} lowpass {lowpass_cutoff} highpass {highpass_cutoff}".format( audio_file=audio_file, amod_lowpass_cutoff=amod_lowpass_cutoff, lowpass_cutoff=lowpass_cutoff, highpass_cutoff=highpass_cutoff, noise_gain=noise_gain, ) return sox_params def build_varying_amplitude_factor(audio_file, lowpass_cutoff=1, ac_gain=-9): ac = "sox {} -p synth whitenoise lowpass {} gain -n {}".format( audio_file, lowpass_cutoff, ac_gain ) dc = "sox {} -p gain -90 dcshift 0.5".format(audio_file) return "sox -m <({}) <({}) -p".format(ac, dc) def multiply_signals(signal_a, signal_b): return ("sox -T <({signal_a}) <({signal_b}) -p").format( signal_a=signal_a, signal_b=signal_b, ) def build_sox_interference( interfere_file, interfere_signal, lowpass_cutoff=1, ac_gain=-6 ): factor = build_varying_amplitude_factor(interfere_file, lowpass_cutoff, ac_gain) return multiply_signals(factor, interfere_signal) def add_signals_trim_to_len(original, signals, augmented): signals_to_add = " ".join(["<(%s)" % s for s in signals]) sox_cmd = "sox -m {signals} -b 16 {augmented} trim 0 $(soxi -D {original})".format( signals=signals_to_add, original=original, augmented=augmented ) return sox_cmd def build_random_bandpass(min_low=50, min_band_width=100, max_high=1000) -> Dict: d = {} max_high_cutoff = MAX_FREQ if np.random.choice([True, False], p=[0.5, 0.5]): lowpass = int(round(np.random.uniform(low=min_low, high=MAX_FREQ))) d["lowpass"] = lowpass max_high_cutoff = lowpass - min_band_width if np.random.choice([True, False], p=[0.5, 0.5]): highpass = int( round(np.random.uniform(low=1, high=min(max_high, max_high_cutoff))) ) d["highpass"] = highpass return d def augment_with_sox(original_file, audio_files, augmented_file): interfere_file = np.random.choice(audio_files) min_SNR = 20 min_SIR = 5 signal_gain = round(np.random.uniform(low=-10, high=0), 2) signal_params = { "tempo": round(np.random.triangular(left=0.7, mode=1.0, right=1.3), 2), "pitch": int( round(np.random.triangular(left=-200, mode=0, right=200)) ), "reverb": (int(round(np.random.uniform(low=0, high=50))), 50, 100, 100, 0, 0,), "gain -n": signal_gain, } signal_params.update(build_random_bandpass(1000, 1000, 100)) interfere_params = { "tempo": round(np.random.uniform(low=0.6, high=1.4), 2), "pitch": int(round(np.random.uniform(low=-500, high=500))), "reverb": (int(round(np.random.uniform(low=0, high=100))), 50, 100, 100, 0, 0), "gain -n": round(np.random.uniform(low=-50, high=signal_gain - min_SIR), 2), } interfere_params.update(build_random_bandpass(50, 100, 1000)) signal = build_sox_distortions(original_file, signal_params) interfere_signal = build_sox_distortions(interfere_file, interfere_params) noise_power = round(np.random.uniform(-60, signal_gain - min_SNR), 2) lowpass = int(round(np.random.uniform(low=100, high=MAX_FREQ))) highpass = int(round(np.random.uniform(low=1, high=lowpass))) noise = build_sox_noise( original_file, np.random.uniform(0.1, 2), lowpass, highpass, noise_power ) interf = build_sox_interference( interfere_file, interfere_signal, lowpass_cutoff=np.random.uniform(0.5, 2), ac_gain=int(round(np.random.uniform(-9, -3))), ) sox_cmd = add_signals_trim_to_len( original_file, [signal, noise, interf], augmented_file ) FNULL = open(os.devnull, "w") subprocess.call(["bash", "-c", sox_cmd], stdout=FNULL, stderr=subprocess.STDOUT) def augment_with_specific_params(): signal_gain = 0 signal_params = dict(tempo=1.0, pitch=0, reverb=0) signal_params["gain -n"] = 0 signal = build_sox_distortions(original, signal_params) interfere_signal = build_sox_distortions( interfering, dict(gain=signal_gain - 10, tempo=0.8, pitch=100, reverb=50) ) noise = build_sox_noise( original, noise_gain=signal_gain - 20, lowpass_cutoff=6000, highpass_cutoff=10 ) interf = build_sox_interference(interfering, interfere_signal) sox_cmd = add_signals_trim_to_len(original, [signal, noise, interf], augmented) subprocess.call(["bash", "-c", sox_cmd]) if __name__ == "__main__": import librosa original = "../../original.wav" augmented = "/tmp/augmented.wav" interfering = "../../interference.wav" for k in range(9): augment_with_sox(original, [interfering], "/tmp/augmented_%d.wav" % k)
true
true
f702808448a1116245a85dccb72d2041a69ba522
1,552
py
Python
appengine/swarming/swarming_bot/logging_utils_test.py
pombreda/swarming
c70f311f3db8f25752c793a0d7b36cf537d95580
[ "Apache-2.0" ]
null
null
null
appengine/swarming/swarming_bot/logging_utils_test.py
pombreda/swarming
c70f311f3db8f25752c793a0d7b36cf537d95580
[ "Apache-2.0" ]
null
null
null
appengine/swarming/swarming_bot/logging_utils_test.py
pombreda/swarming
c70f311f3db8f25752c793a0d7b36cf537d95580
[ "Apache-2.0" ]
1
2021-12-06T03:37:36.000Z
2021-12-06T03:37:36.000Z
#!/usr/bin/env python # Copyright 2014 The Swarming Authors. All rights reserved. # Use of this source code is governed by the Apache v2.0 license that can be # found in the LICENSE file. import logging import os import sys import tempfile import shutil import unittest import re # Import this first before manipulating sys.path to ensure it can load fine. import logging_utils ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, ROOT_DIR) import test_env test_env.setup_test_env() from depot_tools import auto_stub _LOG_HEADER = r'^%s \d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d: ' % os.getpid() class TestLoggingUtils(auto_stub.TestCase): def test_Capture(self): root = logging.RootLogger(logging.DEBUG) with logging_utils.CaptureLogs('foo', root) as log: root.debug('foo') result = log.read() self.assertTrue(re.match(_LOG_HEADER + 'DEBUG foo\n$', result), result) def test_prepare_logging(self): root = logging.RootLogger(logging.DEBUG) tmp_dir = tempfile.mkdtemp(prefix='logging_utils_test') try: filepath = os.path.join(tmp_dir, 'test.log') logging_utils.prepare_logging(filepath, root) root.debug('foo') with open(filepath, 'rb') as f: result = f.read() finally: shutil.rmtree(tmp_dir) # It'd be nice to figure out a way to ensure it's properly in UTC but it's # tricky to do reliably. self.assertTrue(re.match(_LOG_HEADER + 'DEBUG foo\n$', result), result) if __name__ == '__main__': unittest.main()
28.218182
78
0.706186
import logging import os import sys import tempfile import shutil import unittest import re import logging_utils ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, ROOT_DIR) import test_env test_env.setup_test_env() from depot_tools import auto_stub _LOG_HEADER = r'^%s \d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d: ' % os.getpid() class TestLoggingUtils(auto_stub.TestCase): def test_Capture(self): root = logging.RootLogger(logging.DEBUG) with logging_utils.CaptureLogs('foo', root) as log: root.debug('foo') result = log.read() self.assertTrue(re.match(_LOG_HEADER + 'DEBUG foo\n$', result), result) def test_prepare_logging(self): root = logging.RootLogger(logging.DEBUG) tmp_dir = tempfile.mkdtemp(prefix='logging_utils_test') try: filepath = os.path.join(tmp_dir, 'test.log') logging_utils.prepare_logging(filepath, root) root.debug('foo') with open(filepath, 'rb') as f: result = f.read() finally: shutil.rmtree(tmp_dir) # tricky to do reliably. self.assertTrue(re.match(_LOG_HEADER + 'DEBUG foo\n$', result), result) if __name__ == '__main__': unittest.main()
true
true
f70281b724d555ee8d8ebda46c531d9ccbec388d
615
py
Python
frontends/pytorch/test/acap_regression/test_jit_add2.py
marbre/mlir-npcomp
30adf9e6b0c1e94db38050a9e143f20a5a461d17
[ "Apache-2.0" ]
null
null
null
frontends/pytorch/test/acap_regression/test_jit_add2.py
marbre/mlir-npcomp
30adf9e6b0c1e94db38050a9e143f20a5a461d17
[ "Apache-2.0" ]
null
null
null
frontends/pytorch/test/acap_regression/test_jit_add2.py
marbre/mlir-npcomp
30adf9e6b0c1e94db38050a9e143f20a5a461d17
[ "Apache-2.0" ]
null
null
null
# -*- Python -*- # This file is licensed under a pytorch-style license # See frontends/pytorch/LICENSE for license information. import torch import npcomp.frontends.pytorch as torch_mlir import npcomp.frontends.pytorch.test as test # RUN: %PYTHON %s | FileCheck %s dev = torch_mlir.mlir_device() t0 = torch.randn((4,4), device=dev) t1 = torch.randn((4,4), device=dev) t2 = t0 + t1 # # Check the result tensor against the CPU # t0_cpu = t0.to('cpu') t1_cpu = t1.to('cpu') t2_cpu = t2.to('cpu') print (t0_cpu, " +\n", t1_cpu, " =\n", t2_cpu) # CHECK: PASS! add2 check test.compare(t2, t0_cpu + t1_cpu, "add2")
21.964286
56
0.691057
import torch import npcomp.frontends.pytorch as torch_mlir import npcomp.frontends.pytorch.test as test dev = torch_mlir.mlir_device() t0 = torch.randn((4,4), device=dev) t1 = torch.randn((4,4), device=dev) t2 = t0 + t1 t0_cpu = t0.to('cpu') t1_cpu = t1.to('cpu') t2_cpu = t2.to('cpu') print (t0_cpu, " +\n", t1_cpu, " =\n", t2_cpu) test.compare(t2, t0_cpu + t1_cpu, "add2")
true
true
f7028206768d52c2f42babdea8234119adef0637
2,800
py
Python
tests/st/ops/gpu/test_tanh_grad_grad_op.py
GuoSuiming/mindspore
48afc4cfa53d970c0b20eedfb46e039db2a133d5
[ "Apache-2.0" ]
3,200
2020-02-17T12:45:41.000Z
2022-03-31T20:21:16.000Z
tests/st/ops/gpu/test_tanh_grad_grad_op.py
zimo-geek/mindspore
665ec683d4af85c71b2a1f0d6829356f2bc0e1ff
[ "Apache-2.0" ]
176
2020-02-12T02:52:11.000Z
2022-03-28T22:15:55.000Z
tests/st/ops/gpu/test_tanh_grad_grad_op.py
zimo-geek/mindspore
665ec683d4af85c71b2a1f0d6829356f2bc0e1ff
[ "Apache-2.0" ]
621
2020-03-09T01:31:41.000Z
2022-03-30T03:43:19.000Z
# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ import numpy as np import pytest import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from mindspore.ops.operations import _grad_ops as G from mindspore.ops import composite as C context.set_context(mode=context.GRAPH_MODE, device_target="GPU") class NetTanhGrad(nn.Cell): def __init__(self): super(NetTanhGrad, self).__init__() self.tanh_grad = G.TanhGrad() def construct(self, y, grad): return self.tanh_grad(y, grad) class NetTanhGradGrad(nn.Cell): def __init__(self, forward_net): super(NetTanhGradGrad, self).__init__() self.forward_net = forward_net self.gradOps = C.GradOperation(get_all=True, sens_param=True) def construct(self, y, grad, dout): backward_net = self.gradOps(self.forward_net) return backward_net(y, grad, dout) @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def tanh_grad_grad_base(dtype, loss): np.random.seed(1) shape = (4, 2) y_np = (np.random.rand(*shape) * 2 - 1).astype(dtype) grad_np = (np.random.rand(*shape) * 20 - 10).astype(dtype) dout_np = (np.random.rand(*shape) * 20 - 10).astype(dtype) y_np_32 = y_np.astype(np.float32) grad_np_32 = grad_np.astype(np.float32) dout_np_32 = dout_np.astype(np.float32) dy_np = (dout_np_32 * grad_np_32 * (-2.0) * y_np_32).astype(dtype) dgrad_np = (dout_np_32 * (1 - y_np_32 * y_np_32)).astype(dtype) y_ms = Tensor(y_np) grad_ms = Tensor(grad_np) dout_ms = Tensor(dout_np) forward_net = NetTanhGrad() net = NetTanhGradGrad(forward_net) dy_ms, dgrad_ms = net(y_ms, grad_ms, dout_ms) assert np.allclose(dy_ms.asnumpy(), dy_np, loss, loss) assert np.allclose(dgrad_ms.asnumpy(), dgrad_np, loss, loss) @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_tanh_grad_grad_float16(): tanh_grad_grad_base(np.float16, 1e-3) @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_tanh_grad_grad_float32(): tanh_grad_grad_base(np.float32, 1e-4)
32.55814
78
0.706429
import numpy as np import pytest import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from mindspore.ops.operations import _grad_ops as G from mindspore.ops import composite as C context.set_context(mode=context.GRAPH_MODE, device_target="GPU") class NetTanhGrad(nn.Cell): def __init__(self): super(NetTanhGrad, self).__init__() self.tanh_grad = G.TanhGrad() def construct(self, y, grad): return self.tanh_grad(y, grad) class NetTanhGradGrad(nn.Cell): def __init__(self, forward_net): super(NetTanhGradGrad, self).__init__() self.forward_net = forward_net self.gradOps = C.GradOperation(get_all=True, sens_param=True) def construct(self, y, grad, dout): backward_net = self.gradOps(self.forward_net) return backward_net(y, grad, dout) @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def tanh_grad_grad_base(dtype, loss): np.random.seed(1) shape = (4, 2) y_np = (np.random.rand(*shape) * 2 - 1).astype(dtype) grad_np = (np.random.rand(*shape) * 20 - 10).astype(dtype) dout_np = (np.random.rand(*shape) * 20 - 10).astype(dtype) y_np_32 = y_np.astype(np.float32) grad_np_32 = grad_np.astype(np.float32) dout_np_32 = dout_np.astype(np.float32) dy_np = (dout_np_32 * grad_np_32 * (-2.0) * y_np_32).astype(dtype) dgrad_np = (dout_np_32 * (1 - y_np_32 * y_np_32)).astype(dtype) y_ms = Tensor(y_np) grad_ms = Tensor(grad_np) dout_ms = Tensor(dout_np) forward_net = NetTanhGrad() net = NetTanhGradGrad(forward_net) dy_ms, dgrad_ms = net(y_ms, grad_ms, dout_ms) assert np.allclose(dy_ms.asnumpy(), dy_np, loss, loss) assert np.allclose(dgrad_ms.asnumpy(), dgrad_np, loss, loss) @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_tanh_grad_grad_float16(): tanh_grad_grad_base(np.float16, 1e-3) @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_tanh_grad_grad_float32(): tanh_grad_grad_base(np.float32, 1e-4)
true
true
f702829a908f8235e9093bc97a9d0edce1997f1f
379
py
Python
pokemon/permissions.py
pessman/pokemon_utils
cbe06ebe323cb38a35846274d812bdbe8d0ae8ca
[ "MIT" ]
1
2019-03-11T04:12:50.000Z
2019-03-11T04:12:50.000Z
pokemon/permissions.py
pessman/pokemon_utils
cbe06ebe323cb38a35846274d812bdbe8d0ae8ca
[ "MIT" ]
null
null
null
pokemon/permissions.py
pessman/pokemon_utils
cbe06ebe323cb38a35846274d812bdbe8d0ae8ca
[ "MIT" ]
2
2019-03-13T03:17:29.000Z
2019-04-04T20:06:50.000Z
from rest_framework.permissions import SAFE_METHODS, BasePermission class IsAdminOrReadOnly(BasePermission): """ The request is authenticated as an Admin user or is Read Only """ def has_permission(self, request, view): return bool( request.method in SAFE_METHODS or request.user and request.user.is_staff )
25.266667
67
0.662269
from rest_framework.permissions import SAFE_METHODS, BasePermission class IsAdminOrReadOnly(BasePermission): def has_permission(self, request, view): return bool( request.method in SAFE_METHODS or request.user and request.user.is_staff )
true
true
f702830899c6877f2ce2b468190877e2a231b279
1,153
py
Python
stubborn.py
CrossRef/unreliable
874b58c0a0b4519bd679b8ad44d029427571653e
[ "MIT" ]
null
null
null
stubborn.py
CrossRef/unreliable
874b58c0a0b4519bd679b8ad44d029427571653e
[ "MIT" ]
null
null
null
stubborn.py
CrossRef/unreliable
874b58c0a0b4519bd679b8ad44d029427571653e
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import argparse import io import sys from urllib.request import urlopen import urllib.error import time import datetime from retrying import retry URL = "http://unreliable.labs.crossref.org/error" ONE_SECOND=1000 ONE_HOUR=((ONE_SECOND*60)*60) ONE_DAY=(ONE_HOUR*24) @retry(wait_exponential_multiplier=1000,wait_exponential_max=(ONE_HOUR * 6)) def fetch(url): global s d = time.time() - s print("time: " + str(d)) s = time.time() try: with urlopen(url) as response: result = response.read().decode('utf8') print("Done fetching...") return result except urllib.error.URLError as e: print("Error: " + str(e)) raise e def main(): print("Starting...") print(fetch(ARGS.url)) print("Done") s = time.time() if __name__ == '__main__': parser = argparse.ArgumentParser( description="Stubbornly, but intelligently keep retrying to GET the same URL") parser.add_argument("-u", "--url", help="the URL to be stubborn about", type=str, default=URL) ARGS = parser.parse_args() main()
23.06
86
0.635733
import argparse import io import sys from urllib.request import urlopen import urllib.error import time import datetime from retrying import retry URL = "http://unreliable.labs.crossref.org/error" ONE_SECOND=1000 ONE_HOUR=((ONE_SECOND*60)*60) ONE_DAY=(ONE_HOUR*24) @retry(wait_exponential_multiplier=1000,wait_exponential_max=(ONE_HOUR * 6)) def fetch(url): global s d = time.time() - s print("time: " + str(d)) s = time.time() try: with urlopen(url) as response: result = response.read().decode('utf8') print("Done fetching...") return result except urllib.error.URLError as e: print("Error: " + str(e)) raise e def main(): print("Starting...") print(fetch(ARGS.url)) print("Done") s = time.time() if __name__ == '__main__': parser = argparse.ArgumentParser( description="Stubbornly, but intelligently keep retrying to GET the same URL") parser.add_argument("-u", "--url", help="the URL to be stubborn about", type=str, default=URL) ARGS = parser.parse_args() main()
true
true
f702841d0fb9dd541a4a2ee7271135da879a7cdc
988
py
Python
isi_sdk_8_2_1/test/test_auth_id_ntoken_privilege_item.py
mohitjain97/isilon_sdk_python
a371f438f542568edb8cda35e929e6b300b1177c
[ "Unlicense" ]
24
2018-06-22T14:13:23.000Z
2022-03-23T01:21:26.000Z
isi_sdk_8_2_1/test/test_auth_id_ntoken_privilege_item.py
mohitjain97/isilon_sdk_python
a371f438f542568edb8cda35e929e6b300b1177c
[ "Unlicense" ]
46
2018-04-30T13:28:22.000Z
2022-03-21T21:11:07.000Z
isi_sdk_8_2_1/test/test_auth_id_ntoken_privilege_item.py
mohitjain97/isilon_sdk_python
a371f438f542568edb8cda35e929e6b300b1177c
[ "Unlicense" ]
29
2018-06-19T00:14:04.000Z
2022-02-08T17:51:19.000Z
# coding: utf-8 """ Isilon SDK Isilon SDK - Language bindings for the OneFS API # noqa: E501 OpenAPI spec version: 8 Contact: sdk@isilon.com Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import unittest import isi_sdk_8_2_1 from isi_sdk_8_2_1.models.auth_id_ntoken_privilege_item import AuthIdNtokenPrivilegeItem # noqa: E501 from isi_sdk_8_2_1.rest import ApiException class TestAuthIdNtokenPrivilegeItem(unittest.TestCase): """AuthIdNtokenPrivilegeItem unit test stubs""" def setUp(self): pass def tearDown(self): pass def testAuthIdNtokenPrivilegeItem(self): """Test AuthIdNtokenPrivilegeItem""" # FIXME: construct object with mandatory attributes with example values # model = isi_sdk_8_2_1.models.auth_id_ntoken_privilege_item.AuthIdNtokenPrivilegeItem() # noqa: E501 pass if __name__ == '__main__': unittest.main()
24.097561
110
0.730769
from __future__ import absolute_import import unittest import isi_sdk_8_2_1 from isi_sdk_8_2_1.models.auth_id_ntoken_privilege_item import AuthIdNtokenPrivilegeItem from isi_sdk_8_2_1.rest import ApiException class TestAuthIdNtokenPrivilegeItem(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def testAuthIdNtokenPrivilegeItem(self): s if __name__ == '__main__': unittest.main()
true
true
f702853d97d6b0edd707b18d0e9385339233e089
97,990
py
Python
databricks/koalas/frame.py
abishekganesh72/koalas
40c2e209384d078ee75d08c7681d2e6a276ab834
[ "Apache-2.0" ]
null
null
null
databricks/koalas/frame.py
abishekganesh72/koalas
40c2e209384d078ee75d08c7681d2e6a276ab834
[ "Apache-2.0" ]
null
null
null
databricks/koalas/frame.py
abishekganesh72/koalas
40c2e209384d078ee75d08c7681d2e6a276ab834
[ "Apache-2.0" ]
null
null
null
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ A wrapper class for Spark DataFrame to behave similar to pandas DataFrame. """ import re import warnings from functools import partial, reduce from typing import Any, Optional, List, Tuple, Union import numpy as np import pandas as pd from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype, is_list_like, \ is_dict_like from pyspark import sql as spark from pyspark.sql import functions as F, Column from pyspark.sql.types import (BooleanType, ByteType, DecimalType, DoubleType, FloatType, IntegerType, LongType, ShortType, StructField, StructType, to_arrow_type) from pyspark.sql.utils import AnalysisException from databricks import koalas as ks # For running doctests and reference resolution in PyCharm. from databricks.koalas.utils import default_session, validate_arguments_and_invoke_function from databricks.koalas.generic import _Frame, max_display_count from databricks.koalas.metadata import Metadata from databricks.koalas.missing.frame import _MissingPandasLikeDataFrame from databricks.koalas.ml import corr from databricks.koalas.typedef import infer_pd_series_spark_type # These regular expression patterns are complied and defined here to avoid to compile the same # pattern every time it is used in _repr_ and _repr_html_ in DataFrame. # Two patterns basically seek the footer string from Pandas' REPR_PATTERN = re.compile(r"\n\n\[(?P<rows>[0-9]+) rows x (?P<columns>[0-9]+) columns\]$") REPR_HTML_PATTERN = re.compile( r"\n\<p\>(?P<rows>[0-9]+) rows × (?P<columns>[0-9]+) columns\<\/p\>\n\<\/div\>$") class DataFrame(_Frame): """ Koala DataFrame that corresponds to Pandas DataFrame logically. This holds Spark DataFrame internally. :ivar _sdf: Spark Column instance :ivar _metadata: Metadata related to column names and index information. Parameters ---------- data : numpy ndarray (structured or homogeneous), dict, Pandas DataFrame or Spark DataFrame Dict can contain Series, arrays, constants, or list-like objects If data is a dict, argument order is maintained for Python 3.6 and later. Note that if `data` is a Pandas DataFrame, other arguments should not be used. If `data` is a Spark DataFrame, all other arguments except `index` should not be used. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided If `data` is a Spark DataFrame, `index` is expected to be `Metadata`. columns : Index or array-like Column labels to use for resulting frame. Will default to RangeIndex (0, 1, 2, ..., n) if no column labels are provided dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer copy : boolean, default False Copy data from inputs. Only affects DataFrame / 2d ndarray input Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = ks.DataFrame(data=d, columns=['col1', 'col2']) >>> df col1 col2 0 1 3 1 2 4 Constructing DataFrame from Pandas DataFrame >>> df = ks.DataFrame(pd.DataFrame(data=d, columns=['col1', 'col2'])) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = ks.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from numpy ndarray: >>> df2 = ks.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)), ... columns=['a', 'b', 'c', 'd', 'e']) >>> df2 # doctest: +SKIP a b c d e 0 3 1 4 9 8 1 4 8 4 8 4 2 7 6 5 6 7 3 8 7 9 1 0 4 2 5 4 3 9 """ def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False): if isinstance(data, pd.DataFrame): assert index is None assert columns is None assert dtype is None assert not copy self._init_from_pandas(data) elif isinstance(data, spark.DataFrame): assert columns is None assert dtype is None assert not copy self._init_from_spark(data, index) else: pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy) self._init_from_pandas(pdf) def _init_from_pandas(self, pdf): metadata = Metadata.from_pandas(pdf) reset_index = pdf.reset_index() reset_index.columns = metadata.columns schema = StructType([StructField(name, infer_pd_series_spark_type(col), nullable=bool(col.isnull().any())) for name, col in reset_index.iteritems()]) for name, col in reset_index.iteritems(): dt = col.dtype if is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt): continue reset_index[name] = col.replace({np.nan: None}) self._init_from_spark(default_session().createDataFrame(reset_index, schema=schema), metadata) def _init_from_spark(self, sdf, metadata=None): self._sdf = sdf if metadata is None: self._metadata = Metadata(data_columns=self._sdf.schema.fieldNames()) else: self._metadata = metadata @property def _index_columns(self): return [self._sdf.__getitem__(field) for field in self._metadata.index_columns] def _reduce_for_stat_function(self, sfun): """ Applies sfun to each column and returns a pd.Series where the number of rows equal the number of columns. :param sfun: either an 1-arg function that takes a Column and returns a Column, or a 2-arg function that takes a Column and its DataType and returns a Column. """ from inspect import signature exprs = [] num_args = len(signature(sfun).parameters) for col in self.columns: col_sdf = self._sdf[col] col_type = self._sdf.schema[col].dataType if isinstance(col_type, BooleanType) and sfun.__name__ not in ('min', 'max'): # Stat functions cannot be used with boolean values by default # Thus, cast to integer (true to 1 and false to 0) # Exclude the min and max methods though since those work with booleans col_sdf = col_sdf.cast('integer') if num_args == 1: # Only pass in the column if sfun accepts only one arg col_sdf = sfun(col_sdf) else: # must be 2 assert num_args == 2 # Pass in both the column and its data type if sfun accepts two args col_sdf = sfun(col_sdf, col_type) exprs.append(col_sdf.alias(col)) sdf = self._sdf.select(*exprs) pdf = sdf.toPandas() assert len(pdf) == 1, (sdf, pdf) row = pdf.iloc[0] row.name = None return row # Return first row as a Series def corr(self, method='pearson'): """ Compute pairwise correlation of columns, excluding NA/null values. Parameters ---------- method : {'pearson', 'spearman'} * pearson : standard correlation coefficient * spearman : Spearman rank correlation Returns ------- y : pandas.DataFrame See Also -------- Series.corr Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.corr('pearson') dogs cats dogs 1.000000 -0.851064 cats -0.851064 1.000000 >>> df.corr('spearman') dogs cats dogs 1.000000 -0.948683 cats -0.948683 1.000000 Notes ----- There are behavior differences between Koalas and pandas. * the `method` argument only accepts 'pearson', 'spearman' * the data should not contain NaNs. Koalas will return an error. * Koalas doesn't support the following argument(s). * `min_periods` argument is not supported """ return corr(self, method) def iteritems(self): """ Iterator over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Returns ------- label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. Examples -------- >>> df = ks.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala'], ... columns=['species', 'population']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.iteritems(): ... print('label:', label) ... print('content:', content.to_string()) ... label: species content: panda bear polar bear koala marsupial label: population content: panda 1864 polar 22000 koala 80000 """ cols = list(self.columns) return list((col_name, self[col_name]) for col_name in cols) def to_clipboard(self, excel=True, sep=None, **kwargs): """ Copy object to the system clipboard. Write a text representation of object to the system clipboard. This can be pasted into Excel, for example. .. note:: This method should only be used if the resulting DataFrame is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- excel : bool, default True - True, use the provided separator, writing in a csv format for allowing easy pasting into excel. - False, write a string representation of the object to the clipboard. sep : str, default ``'\\t'`` Field delimiter. **kwargs These parameters will be passed to DataFrame.to_csv. Notes ----- Requirements for your platform. - Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules) - Windows : none - OS X : none Examples -------- Copy the contents of a DataFrame to the clipboard. >>> df = ks.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) # doctest: +SKIP >>> df.to_clipboard(sep=',') # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # ,A,B,C ... # 0,1,2,3 ... # 1,4,5,6 We can omit the the index by passing the keyword `index` and setting it to false. >>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # A,B,C ... # 1,2,3 ... # 4,5,6 This function also works for Series: >>> df = ks.Series([1, 2, 3, 4, 5, 6, 7], name='x') # doctest: +SKIP >>> df.to_clipboard(sep=',') # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # 0, 1 ... # 1, 2 ... # 2, 3 ... # 3, 4 ... # 4, 5 ... # 5, 6 ... # 6, 7 """ args = locals() kdf = self return validate_arguments_and_invoke_function( kdf.to_pandas(), self.to_clipboard, pd.DataFrame.to_clipboard, args) def to_html(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.', bold_rows=True, classes=None, escape=True, notebook=False, border=None, table_id=None, render_links=False): """ Render a DataFrame as an HTML table. .. note:: This method should only be used if the resulting Pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, set max_rows parameter. Parameters ---------- buf : StringIO-like, optional Buffer to write to. columns : sequence, optional, default None The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool, optional Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names index : bool, optional, default True Whether to print index (row) labels. na_rep : str, optional, default 'NaN' String representation of NAN to use. formatters : list or dict of one-param. functions, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function, optional, default None Formatter function to apply to columns' elements if they are floats. The result of this function must be a unicode string. sparsify : bool, optional, default True Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. index_names : bool, optional, default True Prints the names of the indexes. justify : str, default None How to justify the column labels. If None uses the option from the print configuration (controlled by set_option), 'right' out of the box. Valid values are * left * right * center * justify * justify-all * start * end * inherit * match-parent * initial * unset. max_rows : int, optional Maximum number of rows to display in the console. max_cols : int, optional Maximum number of columns to display in the console. show_dimensions : bool, default False Display DataFrame dimensions (number of rows by number of columns). decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.html.border``. table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links (only works with Pandas 0.24+). Returns ------- str (or unicode, depending on data and options) String representation of the dataframe. See Also -------- to_string : Convert DataFrame to a string. """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() if max_rows is not None: kdf = self.head(max_rows) else: kdf = self return validate_arguments_and_invoke_function( kdf.to_pandas(), self.to_html, pd.DataFrame.to_html, args) def to_string(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.', line_width=None): """ Render a DataFrame to a console-friendly tabular output. .. note:: This method should only be used if the resulting Pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, set max_rows parameter. Parameters ---------- buf : StringIO-like, optional Buffer to write to. columns : sequence, optional, default None The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool, optional Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names index : bool, optional, default True Whether to print index (row) labels. na_rep : str, optional, default 'NaN' String representation of NAN to use. formatters : list or dict of one-param. functions, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function, optional, default None Formatter function to apply to columns' elements if they are floats. The result of this function must be a unicode string. sparsify : bool, optional, default True Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. index_names : bool, optional, default True Prints the names of the indexes. justify : str, default None How to justify the column labels. If None uses the option from the print configuration (controlled by set_option), 'right' out of the box. Valid values are * left * right * center * justify * justify-all * start * end * inherit * match-parent * initial * unset. max_rows : int, optional Maximum number of rows to display in the console. max_cols : int, optional Maximum number of columns to display in the console. show_dimensions : bool, default False Display DataFrame dimensions (number of rows by number of columns). decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. line_width : int, optional Width to wrap a line in characters. Returns ------- str (or unicode, depending on data and options) String representation of the dataframe. See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> df = ks.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2']) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 >>> print(df.to_string(max_rows=2)) col1 col2 0 1 4 1 2 5 """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() if max_rows is not None: kdf = self.head(max_rows) else: kdf = self return validate_arguments_and_invoke_function( kdf.to_pandas(), self.to_string, pd.DataFrame.to_string, args) def to_dict(self, orient='dict', into=dict): """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). .. note:: This method should only be used if the resulting Pandas DataFrame is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} Abbreviations are allowed. `s` indicates `series` and `sp` indicates `split`. into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. Examples -------- >>> df = ks.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2'], ... columns=['col1', 'col2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df_dict = df.to_dict() >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()]) [('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])] You can specify the return orientation. >>> df_dict = df.to_dict('series') >>> sorted(df_dict.items()) [('col1', row1 1 row2 2 Name: col1, dtype: int64), ('col2', row1 0.50 row2 0.75 Name: col2, dtype: float64)] >>> df_dict = df.to_dict('split') >>> sorted(df_dict.items()) # doctest: +ELLIPSIS [('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])] >>> df_dict = df.to_dict('records') >>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS [[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]] >>> df_dict = df.to_dict('index') >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()]) [('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])] You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), \ ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS [defaultdict(<class 'list'>, {'col..., 'col...}), \ defaultdict(<class 'list'>, {'col..., 'col...})] """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() kdf = self return validate_arguments_and_invoke_function( kdf.to_pandas(), self.to_dict, pd.DataFrame.to_dict, args) def to_latex(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, bold_rows=False, column_format=None, longtable=None, escape=None, encoding=None, decimal='.', multicolumn=None, multicolumn_format=None, multirow=None): r""" Render an object to a LaTeX tabular environment table. Render an object to a tabular environment table. You can splice this into a LaTeX document. Requires usepackage{booktabs}. .. note:: This method should only be used if the resulting Pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, consider alternative formats. Parameters ---------- buf : file descriptor or None Buffer to write to. If None, the output is returned as a string. columns : list of label, optional The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool or list of str, default True Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names. index : bool, default True Write row names (index). na_rep : str, default ‘NaN’ Missing data representation. formatters : list of functions or dict of {str: function}, optional Formatter functions to apply to columns’ elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : str, optional Format string for floating point numbers. sparsify : bool, optional Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. By default, the value will be read from the config module. index_names : bool, default True Prints the names of the indexes. bold_rows : bool, default False Make the row labels bold in the output. column_format : str, optional The columns format as specified in LaTeX table format e.g. ‘rcl’ for 3 columns. By default, ‘l’ will be used for all columns except columns of numbers, which default to ‘r’. longtable : bool, optional By default, the value will be read from the pandas config module. Use a longtable environment instead of tabular. Requires adding a usepackage{longtable} to your LaTeX preamble. escape : bool, optional By default, the value will be read from the pandas config module. When set to False prevents from escaping latex special characters in column names. encoding : str, optional A string representing the encoding to use in the output file, defaults to ‘ascii’ on Python 2 and ‘utf-8’ on Python 3. decimal : str, default ‘.’ Character recognized as decimal separator, e.g. ‘,’ in Europe. multicolumn : bool, default True Use multicolumn to enhance MultiIndex columns. The default will be read from the config module. multicolumn_format : str, default ‘l’ The alignment for multicolumns, similar to column_format The default will be read from the config module. multirow : bool, default False Use multirow to enhance MultiIndex rows. Requires adding a usepackage{multirow} to your LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained rows, separating groups via clines. The default will be read from the pandas config module. Returns ------- str or None If buf is None, returns the resulting LateX format as a string. Otherwise returns None. See Also -------- DataFrame.to_string : Render a DataFrame to a console-friendly tabular output. DataFrame.to_html : Render a DataFrame as an HTML table. Examples -------- >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'], ... 'mask': ['red', 'purple'], ... 'weapon': ['sai', 'bo staff']}, ... columns=['name', 'mask', 'weapon']) >>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE '\\begin{tabular}{lll}\n\\toprule\n name & mask & weapon \\\\\n\\midrule\n Raphael & red & sai \\\\\n Donatello & purple & bo staff \\\\\n\\bottomrule\n\\end{tabular}\n' """ args = locals() kdf = self return validate_arguments_and_invoke_function( kdf.to_pandas(), self.to_latex, pd.DataFrame.to_latex, args) @property def index(self): """The index (row labels) Column of the DataFrame. Currently supported only when the DataFrame has a single index. """ from databricks.koalas.series import Series if len(self._metadata.index_map) != 1: raise KeyError('Currently supported only when the DataFrame has a single index.') return Series(self._index_columns[0], anchor=self, index=[]) def set_index(self, keys, drop=True, append=False, inplace=False): """Set the DataFrame index (row labels) using one or more existing columns. Set the DataFrame index (row labels) using one or more existing columns or arrays (of the correct length). The index can replace the existing index or expand on it. Parameters ---------- keys : label or array-like or list of labels/arrays This parameter can be either a single column key, a single array of the same length as the calling DataFrame, or a list containing an arbitrary combination of column keys and arrays. Here, "array" encompasses :class:`Series`, :class:`Index` and ``np.ndarray``. drop : bool, default True Delete columns to be used as the new index. append : bool, default False Whether to append columns to existing index. inplace : bool, default False Modify the DataFrame in place (do not create a new object). Returns ------- DataFrame Changed row labels. See Also -------- DataFrame.reset_index : Opposite of set_index. Examples -------- >>> df = ks.DataFrame({'month': [1, 4, 7, 10], ... 'year': [2012, 2014, 2013, 2014], ... 'sale': [55, 40, 84, 31]}, ... columns=['month', 'year', 'sale']) >>> df month year sale 0 1 2012 55 1 4 2014 40 2 7 2013 84 3 10 2014 31 Set the index to become the 'month' column: >>> df.set_index('month') # doctest: +NORMALIZE_WHITESPACE year sale month 1 2012 55 4 2014 40 7 2013 84 10 2014 31 Create a MultiIndex using columns 'year' and 'month': >>> df.set_index(['year', 'month']) # doctest: +NORMALIZE_WHITESPACE sale year month 2012 1 55 2014 4 40 2013 7 84 2014 10 31 """ if isinstance(keys, str): keys = [keys] else: keys = list(keys) for key in keys: if key not in self.columns: raise KeyError(key) if drop: data_columns = [column for column in self._metadata.data_columns if column not in keys] else: data_columns = self._metadata.data_columns if append: index_map = self._metadata.index_map + [(column, column) for column in keys] else: index_map = [(column, column) for column in keys] metadata = self._metadata.copy(data_columns=data_columns, index_map=index_map) # Sync Spark's columns as well. sdf = self._sdf.select(['`{}`'.format(name) for name in metadata.columns]) if inplace: self._metadata = metadata self._sdf = sdf else: kdf = self.copy() kdf._metadata = metadata kdf._sdf = sdf return kdf def reset_index(self, level=None, drop=False, inplace=False): """Reset the index, or a level of it. For DataFrame with multi-level index, return new DataFrame with labeling information in the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None. For a standard index, the index name will be used (if set), otherwise a default 'index' or 'level_0' (if 'index' is already taken) will be used. Parameters ---------- level : int, str, tuple, or list, default None Only remove the given levels from the index. Removes all levels by default. drop : bool, default False Do not try to insert index into dataframe columns. This resets the index to the default integer index. inplace : bool, default False Modify the DataFrame in place (do not create a new object). Returns ------- DataFrame DataFrame with the new index. See Also -------- DataFrame.set_index : Opposite of reset_index. Examples -------- >>> df = ks.DataFrame([('bird', 389.0), ... ('bird', 24.0), ... ('mammal', 80.5), ... ('mammal', np.nan)], ... index=['falcon', 'parrot', 'lion', 'monkey'], ... columns=('class', 'max_speed')) >>> df class max_speed falcon bird 389.0 parrot bird 24.0 lion mammal 80.5 monkey mammal NaN When we reset the index, the old index is added as a column. Unlike pandas, Koalas does not automatically add a sequential index. The following 0, 1, 2, 3 are only there when we display the DataFrame. >>> df.reset_index() index class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN We can use the `drop` parameter to avoid the old index being added as a column: >>> df.reset_index(drop=True) class max_speed 0 bird 389.0 1 bird 24.0 2 mammal 80.5 3 mammal NaN """ # TODO: add example of MultiIndex back. See https://github.com/databricks/koalas/issues/301 if len(self._metadata.index_map) == 0: raise NotImplementedError('Can\'t reset index because there is no index.') multi_index = len(self._metadata.index_map) > 1 def rename(index): if multi_index: return 'level_{}'.format(index) else: if 'index' not in self._metadata.data_columns: return 'index' else: return 'level_{}'.format(index) if level is None: new_index_map = [(column, name if name is not None else rename(i)) for i, (column, name) in enumerate(self._metadata.index_map)] index_map = [] else: if isinstance(level, (int, str)): level = [level] level = list(level) if all(isinstance(l, int) for l in level): for lev in level: if lev >= len(self._metadata.index_map): raise IndexError('Too many levels: Index has only {} level, not {}' .format(len(self._metadata.index_map), lev + 1)) idx = level elif all(isinstance(lev, str) for lev in level): idx = [] for l in level: try: i = self._metadata.index_columns.index(l) idx.append(i) except ValueError: if multi_index: raise KeyError('Level unknown not found') else: raise KeyError('Level unknown must be same as name ({})' .format(self._metadata.index_columns[0])) else: raise ValueError('Level should be all int or all string.') idx.sort() new_index_map = [] index_map = self._metadata.index_map.copy() for i in idx: info = self._metadata.index_map[i] index_column, index_name = info new_index_map.append( (index_column, index_name if index_name is not None else rename(index_name))) index_map.remove(info) if drop: new_index_map = [] metadata = self._metadata.copy( data_columns=[column for column, _ in new_index_map] + self._metadata.data_columns, index_map=index_map) columns = [name for _, name in new_index_map] + self._metadata.data_columns if inplace: self._metadata = metadata self.columns = columns else: kdf = self.copy() kdf._metadata = metadata kdf.columns = columns return kdf def isnull(self): """ Detects missing values for items in the current Dataframe. Return a boolean same-sized Dataframe indicating if the values are NA. NA values, such as None or numpy.NaN, gets mapped to True values. Everything else gets mapped to False values. See Also -------- Dataframe.notnull Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)]) >>> df.isnull() 0 1 0 False False 1 False True 2 False True 3 False False >>> df = ks.DataFrame([[None, 'bee', None], ['dog', None, 'fly']]) >>> df.isnull() 0 1 2 0 True False True 1 False True False """ kdf = self.copy() for name, ks in kdf.iteritems(): kdf[name] = ks.isnull() return kdf isna = isnull def notnull(self): """ Detects non-missing values for items in the current Dataframe. This function takes a dataframe and indicates whether it's values are valid (not missing, which is ``NaN`` in numeric datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike). See Also -------- Dataframe.isnull Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)]) >>> df.notnull() 0 1 0 True True 1 True False 2 True False 3 True True >>> df = ks.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df.notnull() 0 1 2 0 True True True 1 True False True """ kdf = self.copy() for name, ks in kdf.iteritems(): kdf[name] = ks.notnull() return kdf notna = notnull def to_koalas(self): """ Converts the existing DataFrame into a Koalas DataFrame. This method is monkey-patched into Spark's DataFrame and can be used to convert a Spark DataFrame into a Koalas DataFrame. If running on an existing Koalas DataFrame, the method returns itself. If a Koalas DataFrame is converted to a Spark DataFrame and then back to Koalas, it will lose the index information and the original index will be turned into a normal column. See Also -------- DataFrame.to_spark Examples -------- >>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}, columns=['col1', 'col2']) >>> df col1 col2 0 1 3 1 2 4 >>> spark_df = df.to_spark() >>> spark_df DataFrame[__index_level_0__: bigint, col1: bigint, col2: bigint] >>> kdf = spark_df.to_koalas() >>> kdf __index_level_0__ col1 col2 0 0 1 3 1 1 2 4 Calling to_koalas on a Koalas DataFrame simply returns itself. >>> df.to_koalas() col1 col2 0 1 3 1 2 4 """ if isinstance(self, DataFrame): return self else: return DataFrame(self) def to_spark(self): """ Return the current DataFrame as a Spark DataFrame. See Also -------- DataFrame.to_koalas """ return self._sdf def to_pandas(self): """ Return a Pandas DataFrame. .. note:: This method should only be used if the resulting Pandas DataFrame is expected to be small, as all the data is loaded into the driver's memory. Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.to_pandas() dogs cats 0 0.2 0.3 1 0.0 0.6 2 0.6 0.0 3 0.2 0.1 """ sdf = self._sdf.select(['`{}`'.format(name) for name in self._metadata.columns]) pdf = sdf.toPandas() if len(pdf) == 0 and len(sdf.schema) > 0: # TODO: push to OSS pdf = pdf.astype({field.name: to_arrow_type(field.dataType).to_pandas_dtype() for field in sdf.schema}) index_columns = self._metadata.index_columns if len(index_columns) > 0: append = False for index_field in index_columns: drop = index_field not in self._metadata.data_columns pdf = pdf.set_index(index_field, drop=drop, append=append) append = True pdf = pdf[self._metadata.data_columns] index_names = self._metadata.index_names if len(index_names) > 0: if isinstance(pdf.index, pd.MultiIndex): pdf.index.names = index_names else: pdf.index.name = index_names[0] return pdf # Alias to maintain backward compatibility with Spark toPandas = to_pandas def assign(self, **kwargs): """ Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though Koalas doesn't check it). If the values are not callable, (e.g. a Series or a literal), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Examples -------- >>> df = ks.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence and you can also create multiple columns within the same assign. >>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32, ... temp_k=df['temp_c'] + 273.15) >>> assigned[['temp_c', 'temp_f', 'temp_k']] temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 Notes ----- Assigning multiple columns within the same ``assign`` is possible but you cannot refer to newly created or modified columns. This feature is supported in pandas for Python 3.6 and later but not in Koalas. In Koalas, all items are computed first, and then assigned. """ from databricks.koalas.series import Series for k, v in kwargs.items(): if not (isinstance(v, (Series, spark.Column)) or callable(v) or pd.api.types.is_scalar(v)): raise TypeError("Column assignment doesn't support type " "{0}".format(type(v).__name__)) if callable(v): kwargs[k] = v(self) pairs = list(kwargs.items()) sdf = self._sdf for (name, c) in pairs: if isinstance(c, Series): sdf = sdf.withColumn(name, c._scol) elif isinstance(c, Column): sdf = sdf.withColumn(name, c) else: sdf = sdf.withColumn(name, F.lit(c)) data_columns = self._metadata.data_columns metadata = self._metadata.copy( data_columns=(data_columns + [name for name, _ in pairs if name not in data_columns])) return DataFrame(sdf, metadata) def to_records(self, index=True, convert_datetime64=None, column_dtypes=None, index_dtypes=None): """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. .. note:: This method should only be used if the resulting NumPy ndarray is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. convert_datetime64 : bool, default None Whether to convert the index to datetime.datetime if it is a DatetimeIndex. column_dtypes : str, type, dict, default None If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = ks.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() # doctest: +SKIP rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) # doctest: +SKIP rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Specification of dtype for columns is new in Pandas 0.24.0. Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) # doctest: +SKIP rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i4'), ('B', '<f8')]) Specification of dtype for index is new in Pandas 0.24.0. Data types can also be specified for the index: >>> df.to_records(index_dtypes="<S2") # doctest: +SKIP rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('index', 'S2'), ('A', '<i8'), ('B', '<f8')]) """ args = locals() kdf = self return validate_arguments_and_invoke_function( kdf.to_pandas(), self.to_records, pd.DataFrame.to_records, args) def copy(self) -> 'DataFrame': """ Make a copy of this object's indices and data. Returns ------- copy : DataFrame """ return DataFrame(self._sdf, self._metadata.copy()) def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False): """ Remove missing values. Parameters ---------- axis : {0 or 'index'}, default 0 Determine if rows or columns which contain missing values are removed. * 0, or 'index' : Drop rows which contain missing values. how : {'any', 'all'}, default 'any' Determine if row or column is removed from DataFrame, when we have at least one NA or all NA. * 'any' : If any NA values are present, drop that row or column. * 'all' : If all values are NA, drop that row or column. thresh : int, optional Require that many non-NA values. subset : array-like, optional Labels along other axis to consider, e.g. if you are dropping rows these would be a list of columns to include. inplace : bool, default False If True, do operation inplace and return None. Returns ------- DataFrame DataFrame with NA entries dropped from it. See Also -------- DataFrame.drop : Drop specified labels from columns. DataFrame.isnull: Indicate missing values. DataFrame.notnull : Indicate existing (non-missing) values. Examples -------- >>> df = ks.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'], ... "toy": [None, 'Batmobile', 'Bullwhip'], ... "born": [None, "1940-04-25", None]}, ... columns=['name', 'toy', 'born']) >>> df name toy born 0 Alfred None None 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip None Drop the rows where at least one element is missing. >>> df.dropna() name toy born 1 Batman Batmobile 1940-04-25 Drop the rows where all elements are missing. >>> df.dropna(how='all') name toy born 0 Alfred None None 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip None Keep only the rows with at least 2 non-NA values. >>> df.dropna(thresh=2) name toy born 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip None Define in which columns to look for missing values. >>> df.dropna(subset=['name', 'born']) name toy born 1 Batman Batmobile 1940-04-25 Keep the DataFrame with valid entries in the same variable. >>> df.dropna(inplace=True) >>> df name toy born 1 Batman Batmobile 1940-04-25 """ if axis == 0 or axis == 'index': if subset is not None: if isinstance(subset, str): columns = [subset] else: columns = list(subset) invalids = [column for column in columns if column not in self._metadata.data_columns] if len(invalids) > 0: raise KeyError(invalids) else: columns = list(self.columns) cnt = reduce(lambda x, y: x + y, [F.when(self[column].notna()._scol, 1).otherwise(0) for column in columns], F.lit(0)) if thresh is not None: pred = cnt >= F.lit(int(thresh)) elif how == 'any': pred = cnt == F.lit(len(columns)) elif how == 'all': pred = cnt > F.lit(0) else: if how is not None: raise ValueError('invalid how option: {h}'.format(h=how)) else: raise TypeError('must specify how or thresh') sdf = self._sdf.filter(pred) if inplace: self._sdf = sdf else: return DataFrame(sdf, self._metadata.copy()) else: raise NotImplementedError("dropna currently only works for axis=0 or axis='index'") def fillna(self, value=None, axis=None, inplace=False): """Fill NA/NaN values. Parameters ---------- value : scalar, dict, Series Value to use to fill holes. alternately a dict/Series of values specifying which value to use for each column. DataFrame is not supported. axis : {0 or `index`} 1 and `columns` are not supported. inplace : boolean, default False Fill in place (do not create a new object) Returns ------- DataFrame DataFrame with NA entries filled. Examples -------- >>> df = ks.DataFrame({ ... 'A': [None, 3, None, None], ... 'B': [2, 4, None, 3], ... 'C': [None, None, None, 1], ... 'D': [0, 1, 5, 4] ... }, ... columns=['A', 'B', 'C', 'D']) >>> df A B C D 0 NaN 2.0 NaN 0 1 3.0 4.0 NaN 1 2 NaN NaN NaN 5 3 NaN 3.0 1.0 4 Replace all NaN elements with 0s. >>> df.fillna(0) A B C D 0 0.0 2.0 0.0 0 1 3.0 4.0 0.0 1 2 0.0 0.0 0.0 5 3 0.0 3.0 1.0 4 Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1, 2, and 3 respectively. >>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3} >>> df.fillna(value=values) A B C D 0 0.0 2.0 2.0 0 1 3.0 4.0 2.0 1 2 0.0 1.0 2.0 5 3 0.0 3.0 1.0 4 """ if axis is None: axis = 0 if not (axis == 0 or axis == "index"): raise NotImplementedError("fillna currently only works for axis=0 or axis='index'") if value is None: raise ValueError('Currently must specify value') if not isinstance(value, (float, int, str, bool, dict, pd.Series)): raise TypeError("Unsupported type %s" % type(value)) if isinstance(value, pd.Series): value = value.to_dict() if isinstance(value, dict): for v in value.values(): if not isinstance(v, (float, int, str, bool)): raise TypeError("Unsupported type %s" % type(v)) sdf = self._sdf.fillna(value) if inplace: self._sdf = sdf else: return DataFrame(sdf, self._metadata.copy()) def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) \ -> 'DataFrame': """ Trim values at input threshold(s). Assigns values outside boundary to boundary values. Parameters ---------- lower : float or int, default None Minimum threshold value. All values below this threshold will be set to it. upper : float or int, default None Maximum threshold value. All values above this threshold will be set to it. Returns ------- DataFrame DataFrame with the values outside the clip boundaries replaced. Examples -------- >>> ks.DataFrame({'A': [0, 2, 4]}).clip(1, 3) A 0 1 1 2 2 3 Notes ----- One difference between this implementation and pandas is that running pd.DataFrame({'A': ['a', 'b']}).clip(0, 1) will crash with "TypeError: '<=' not supported between instances of 'str' and 'int'" while ks.DataFrame({'A': ['a', 'b']}).clip(0, 1) will output the original DataFrame, simply ignoring the incompatible types. """ if is_list_like(lower) or is_list_like(upper): raise ValueError("List-like value are not supported for 'lower' and 'upper' at the " + "moment") if lower is None and upper is None: return self sdf = self._sdf numeric_types = (DecimalType, DoubleType, FloatType, ByteType, IntegerType, LongType, ShortType) numeric_columns = [c for c in self.columns if isinstance(sdf.schema[c].dataType, numeric_types)] nonnumeric_columns = [c for c in self.columns if not isinstance(sdf.schema[c].dataType, numeric_types)] if lower is not None: sdf = sdf.select(*[F.when(F.col(c) < lower, lower).otherwise(F.col(c)).alias(c) for c in numeric_columns] + nonnumeric_columns) if upper is not None: sdf = sdf.select(*[F.when(F.col(c) > upper, upper).otherwise(F.col(c)).alias(c) for c in numeric_columns] + nonnumeric_columns) # Restore initial column order sdf = sdf.select(list(self.columns)) return ks.DataFrame(sdf) def head(self, n=5): """ Return the first `n` rows. This function returns the first `n` rows for the object based on position. It is useful for quickly testing if your object has the right type of data in it. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- obj_head : same type as caller The first `n` rows of the caller object. Examples -------- >>> df = ks.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the first 5 lines >>> df.head() animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey Viewing the first `n` lines (three in this case) >>> df.head(3) animal 0 alligator 1 bee 2 falcon """ return DataFrame(self._sdf.limit(n), self._metadata.copy()) @property def columns(self): """The column labels of the DataFrame.""" return pd.Index(self._metadata.data_columns) @columns.setter def columns(self, names): old_names = self._metadata.data_columns if len(old_names) != len(names): raise ValueError( "Length mismatch: Expected axis has %d elements, new values have %d elements" % (len(old_names), len(names))) sdf = self._sdf.select(self._metadata.index_columns + [self[old_name]._scol.alias(new_name) for (old_name, new_name) in zip(old_names, names)]) self._sdf = sdf self._metadata = self._metadata.copy(data_columns=names) @property def dtypes(self): """Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the object dtype. Returns ------- pd.Series The data type of each column. Examples -------- >>> df = ks.DataFrame({'a': list('abc'), ... 'b': list(range(1, 4)), ... 'c': np.arange(3, 6).astype('i1'), ... 'd': np.arange(4.0, 7.0, dtype='float64'), ... 'e': [True, False, True], ... 'f': pd.date_range('20130101', periods=3)}, ... columns=['a', 'b', 'c', 'd', 'e', 'f']) >>> df.dtypes a object b int64 c int8 d float64 e bool f datetime64[ns] dtype: object """ return pd.Series([self[col].dtype for col in self._metadata.data_columns], index=self._metadata.data_columns) def count(self): """ Count non-NA cells for each column. The values `None`, `NaN` are considered NA. Returns ------- pandas.Series See Also -------- Series.count: Number of non-NA elements in a Series. DataFrame.shape: Number of DataFrame rows and columns (including NA elements). DataFrame.isna: Boolean same-sized DataFrame showing places of NA elements. Examples -------- Constructing DataFrame from a dictionary: >>> df = ks.DataFrame({"Person": ... ["John", "Myla", "Lewis", "John", "Myla"], ... "Age": [24., np.nan, 21., 33, 26], ... "Single": [False, True, True, True, False]}, ... columns=["Person", "Age", "Single"]) >>> df Person Age Single 0 John 24.0 False 1 Myla NaN True 2 Lewis 21.0 True 3 John 33.0 True 4 Myla 26.0 False Notice the uncounted NA values: >>> df.count() Person 5 Age 4 Single 5 dtype: int64 """ return self._reduce_for_stat_function(_Frame._count_expr) def drop(self, labels=None, axis=1, columns: Union[str, List[str]] = None): """ Drop specified labels from columns. Remove columns by specifying label names and axis=1 or columns. When specifying both labels and columns, only labels will be dropped. Removing rows is yet to be implemented. Parameters ---------- labels : single label or list-like Column labels to drop. axis : {1 or 'columns'}, default 1 .. dropna currently only works for axis=1 'columns' axis=0 is yet to be implemented. columns : single label or list-like Alternative to specifying axis (``labels, axis=1`` is equivalent to ``columns=labels``). Returns ------- dropped : DataFrame See Also -------- Series.dropna Examples -------- >>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]}, ... columns=['x', 'y', 'z', 'w']) >>> df x y z w 0 1 3 5 7 1 2 4 6 8 >>> df.drop('x', axis=1) y z w 0 3 5 7 1 4 6 8 >>> df.drop(['y', 'z'], axis=1) x w 0 1 7 1 2 8 >>> df.drop(columns=['y', 'z']) x w 0 1 7 1 2 8 Notes ----- Currently only axis = 1 is supported in this function, axis = 0 is yet to be implemented. """ if labels is not None: axis = self._validate_axis(axis) if axis == 1: return self.drop(columns=labels) raise NotImplementedError("Drop currently only works for axis=1") elif columns is not None: if isinstance(columns, str): columns = [columns] sdf = self._sdf.drop(*columns) metadata = self._metadata.copy( data_columns=[column for column in self.columns if column not in columns] ) return DataFrame(sdf, metadata) else: raise ValueError("Need to specify at least one of 'labels' or 'columns'") def get(self, key, default=None): """ Get item from object for given key (DataFrame column, Panel slice, etc.). Returns default value if not found. Parameters ---------- key : object Returns ------- value : same type as items contained in object Examples -------- >>> df = ks.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']}, ... columns=['x', 'y', 'z']) >>> df x y z 0 0 a a 1 1 b b 2 2 b b >>> df.get('x') 0 0 1 1 2 2 Name: x, dtype: int64 >>> df.get(['x', 'y']) x y 0 0 a 1 1 b 2 2 b """ try: return self._pd_getitem(key) except (KeyError, ValueError, IndexError): return default def sort_values(self, by, ascending=True, inplace=False, na_position='last'): """ Sort by the values along either axis. Parameters ---------- by : str or list of str ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False if True, perform operation in-place na_position : {'first', 'last'}, default 'last' `first` puts NaNs at the beginning, `last` puts NaNs at the end Returns ------- sorted_obj : DataFrame Examples -------- >>> df = ks.DataFrame({ ... 'col1': ['A', 'B', None, 'D', 'C'], ... 'col2': [2, 9, 8, 7, 4], ... 'col3': [0, 9, 4, 2, 3], ... }, ... columns=['col1', 'col2', 'col3']) >>> df col1 col2 col3 0 A 2 0 1 B 9 9 2 None 8 4 3 D 7 2 4 C 4 3 Sort by col1 >>> df.sort_values(by=['col1']) col1 col2 col3 0 A 2 0 1 B 9 9 4 C 4 3 3 D 7 2 2 None 8 4 Sort Descending >>> df.sort_values(by='col1', ascending=False) col1 col2 col3 3 D 7 2 4 C 4 3 1 B 9 9 0 A 2 0 2 None 8 4 Sort by multiple columns >>> df = ks.DataFrame({ ... 'col1': ['A', 'A', 'B', None, 'D', 'C'], ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... }, ... columns=['col1', 'col2', 'col3']) >>> df.sort_values(by=['col1', 'col2']) col1 col2 col3 1 A 1 1 0 A 2 0 2 B 9 9 5 C 4 3 4 D 7 2 3 None 8 4 """ if isinstance(by, str): by = [by] if isinstance(ascending, bool): ascending = [ascending] * len(by) if len(ascending) != len(by): raise ValueError('Length of ascending ({}) != length of by ({})' .format(len(ascending), len(by))) if na_position not in ('first', 'last'): raise ValueError("invalid na_position: '{}'".format(na_position)) # Mapper: Get a spark column function for (ascending, na_position) combination # Note that 'asc_nulls_first' and friends were added as of Spark 2.4, see SPARK-23847. mapper = { (True, 'first'): lambda x: Column(getattr(x._jc, "asc_nulls_first")()), (True, 'last'): lambda x: Column(getattr(x._jc, "asc_nulls_last")()), (False, 'first'): lambda x: Column(getattr(x._jc, "desc_nulls_first")()), (False, 'last'): lambda x: Column(getattr(x._jc, "desc_nulls_last")()), } by = [mapper[(asc, na_position)](self[colname]._scol) for colname, asc in zip(by, ascending)] kdf = DataFrame(self._sdf.sort(*by), self._metadata.copy()) if inplace: self._sdf = kdf._sdf self._metadata = kdf._metadata else: return kdf # TODO: add keep = First def nlargest(self, n: int, columns: 'Any') -> 'DataFrame': """ Return the first `n` rows ordered by `columns` in descending order. Return the first `n` rows with the largest values in `columns`, in descending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=False).head(n)``, but more performant in Pandas. In Koalas, thanks to Spark's lazy execution and query optimizer, the two would have same performance. Parameters ---------- n : int Number of rows to return. columns : label or list of labels Column label(s) to order by. Returns ------- DataFrame The first `n` rows ordered by the given columns in descending order. See Also -------- DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in ascending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Notes ----- This function cannot be used with all column types. For example, when specifying columns with `object` or `category` dtypes, ``TypeError`` is raised. Examples -------- >>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan], ... 'Y': [6, 7, 8, 9, 10, 11, 12]}) >>> df X Y 0 1.0 6 1 2.0 7 2 3.0 8 3 5.0 9 4 6.0 10 5 7.0 11 6 NaN 12 In the following example, we will use ``nlargest`` to select the three rows having the largest values in column "population". >>> df.nlargest(n=3, columns='X') X Y 5 7.0 11 4 6.0 10 3 5.0 9 >>> df.nlargest(n=3, columns=['Y', 'X']) X Y 6 NaN 12 5 7.0 11 4 6.0 10 """ return self.sort_values(by=columns, ascending=False).head(n=n) # TODO: add keep = First def nsmallest(self, n: int, columns: 'Any') -> 'DataFrame': """ Return the first `n` rows ordered by `columns` in ascending order. Return the first `n` rows with the smallest values in `columns`, in ascending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``, but more performant. In Koalas, thanks to Spark's lazy execution and query optimizer, the two would have same performance. Parameters ---------- n : int Number of items to retrieve. columns : list or str Column name or names to order by. Returns ------- DataFrame See Also -------- DataFrame.nlargest : Return the first `n` rows ordered by `columns` in descending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Examples -------- >>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan], ... 'Y': [6, 7, 8, 9, 10, 11, 12]}) >>> df X Y 0 1.0 6 1 2.0 7 2 3.0 8 3 5.0 9 4 6.0 10 5 7.0 11 6 NaN 12 In the following example, we will use ``nsmallest`` to select the three rows having the smallest values in column "a". >>> df.nsmallest(n=3, columns='X') # doctest: +NORMALIZE_WHITESPACE X Y 0 1.0 6 1 2.0 7 2 3.0 8 To order by the largest values in column "a" and then "c", we can specify multiple columns like in the next example. >>> df.nsmallest(n=3, columns=['Y', 'X']) # doctest: +NORMALIZE_WHITESPACE X Y 0 1.0 6 1 2.0 7 2 3.0 8 """ return self.sort_values(by=columns, ascending=True).head(n=n) def isin(self, values): """ Whether each element in the DataFrame is contained in values. Parameters ---------- values : iterable or dict The sequence of values to test. If values is a dict, the keys must be the column names, which must match. Series and DataFrame are not supported. Returns ------- DataFrame DataFrame of booleans showing whether each element in the DataFrame is contained in values. Examples -------- >>> df = ks.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]}, ... index=['falcon', 'dog'], ... columns=['num_legs', 'num_wings']) >>> df num_legs num_wings falcon 2 2 dog 4 0 When ``values`` is a list check whether every value in the DataFrame is present in the list (which animals have 0 or 2 legs or wings) >>> df.isin([0, 2]) num_legs num_wings falcon True True dog False True When ``values`` is a dict, we can pass values to check for each column separately: >>> df.isin({'num_wings': [0, 3]}) num_legs num_wings falcon False False dog False True """ if isinstance(values, (pd.DataFrame, pd.Series)): raise NotImplementedError("DataFrame and Series are not supported") if isinstance(values, dict) and not set(values.keys()).issubset(self.columns): raise AttributeError( "'DataFrame' object has no attribute %s" % (set(values.keys()).difference(self.columns))) _select_columns = self._metadata.index_columns if isinstance(values, dict): for col in self.columns: if col in values: _select_columns.append(self[col]._scol.isin(values[col]).alias(col)) else: _select_columns.append(F.lit(False).alias(col)) elif is_list_like(values): _select_columns += [ self[col]._scol.isin(list(values)).alias(col) for col in self.columns] else: raise TypeError('Values should be iterable, Series, DataFrame or dict.') return DataFrame(self._sdf.select(_select_columns), self._metadata.copy()) def pipe(self, func, *args, **kwargs): r""" Apply func(self, \*args, \*\*kwargs). Parameters ---------- func : function function to apply to the DataFrame. ``args``, and ``kwargs`` are passed into ``func``. Alternatively a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of ``callable`` that expects the DataFrames. args : iterable, optional positional arguments passed into ``func``. kwargs : mapping, optional a dictionary of keyword arguments passed into ``func``. Returns ------- object : the return type of ``func``. Notes ----- Use ``.pipe`` when chaining together functions that expect Series, DataFrames or GroupBy objects. For example, given >>> df = ks.DataFrame({'category': ['A', 'A', 'B'], ... 'col1': [1, 2, 3], ... 'col2': [4, 5, 6]}, ... columns=['category', 'col1', 'col2']) >>> def keep_category_a(df): ... return df[df['category'] == 'A'] >>> def add_one(df, column): ... return df.assign(col3=df[column] + 1) >>> def multiply(df, column1, column2): ... return df.assign(col4=df[column1] * df[column2]) instead of writing >>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3") category col1 col2 col3 col4 0 A 1 4 2 8 1 A 2 5 3 15 You can write >>> (df.pipe(keep_category_a) ... .pipe(add_one, column="col1") ... .pipe(multiply, column1="col2", column2="col3") ... ) category col1 col2 col3 col4 0 A 1 4 2 8 1 A 2 5 3 15 If you have a function that takes the data as (say) the second argument, pass a tuple indicating which keyword expects the data. For example, suppose ``f`` takes its data as ``df``: >>> def multiply_2(column1, df, column2): ... return df.assign(col4=df[column1] * df[column2]) Then you can write >>> (df.pipe(keep_category_a) ... .pipe(add_one, column="col1") ... .pipe((multiply_2, 'df'), column1="col2", column2="col3") ... ) category col1 col2 col3 col4 0 A 1 4 2 8 1 A 2 5 3 15 """ if isinstance(func, tuple): func, target = func if target in kwargs: raise ValueError('%s is both the pipe target and a keyword ' 'argument' % target) kwargs[target] = self return func(*args, **kwargs) else: return func(self, *args, **kwargs) @property def shape(self): """ Return a tuple representing the dimensionality of the DataFrame. Examples -------- >>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self), len(self.columns) def merge(self, right: 'DataFrame', how: str = 'inner', on: str = None, left_index: bool = False, right_index: bool = False, suffixes: Tuple[str, str] = ('_x', '_y')) -> 'DataFrame': """ Merge DataFrame objects with a database-style join. Parameters ---------- right: Object to merge with. how: Type of merge to be performed. {‘left’, ‘right’, ‘outer’, ‘inner’}, default ‘inner’ left: use only keys from left frame, similar to a SQL left outer join; preserve key order. right: use only keys from right frame, similar to a SQL right outer join; preserve key order. outer: use union of keys from both frames, similar to a SQL full outer join; sort keys lexicographically. inner: use intersection of keys from both frames, similar to a SQL inner join; preserve the order of the left keys. on: Column or index level names to join on. These must be found in both DataFrames. If on is None and not merging on indexes then this defaults to the intersection of the columns in both DataFrames. left_index: Use the index from the left DataFrame as the join key(s). If it is a MultiIndex, the number of keys in the other DataFrame (either the index or a number of columns) must match the number of levels. right_index: Use the index from the right DataFrame as the join key. Same caveats as left_index. suffixes: Suffix to apply to overlapping column names in the left and right side, respectively. Returns ------- DataFrame A DataFrame of the two merged objects. Examples -------- >>> left_kdf = ks.DataFrame({'A': [1, 2]}) >>> right_kdf = ks.DataFrame({'B': ['x', 'y']}, index=[1, 2]) >>> left_kdf.merge(right_kdf, left_index=True, right_index=True) A B 0 2 x >>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='left') A B 0 1 None 1 2 x >>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='right') A B 0 2.0 x 1 NaN y >>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='outer') A B 0 1.0 None 1 2.0 x 2 NaN y Notes ----- As described in #263, joining string columns currently returns None for missing values instead of NaN. """ if on is None and not left_index and not right_index: raise ValueError("At least 'on' or 'left_index' and 'right_index' have to be set") if on is not None and (left_index or right_index): raise ValueError("Only 'on' or 'left_index' and 'right_index' can be set") if how == 'full': warnings.warn("Warning: While Koalas will accept 'full', you should use 'outer' " + "instead to be compatible with the pandas merge API", UserWarning) if how == 'outer': # 'outer' in pandas equals 'full' in Spark how = 'full' if how not in ('inner', 'left', 'right', 'full'): raise ValueError("The 'how' parameter has to be amongst the following values: ", "['inner', 'left', 'right', 'outer']") if on is None: # FIXME Move index string to constant? on = '__index_level_0__' left_table = self._sdf.alias('left_table') right_table = right._sdf.alias('right_table') # Unpack suffixes tuple for convenience left_suffix = suffixes[0] right_suffix = suffixes[1] # Append suffixes to columns with the same name to avoid conflicts later duplicate_columns = list(self.columns & right.columns) if duplicate_columns: for duplicate_column_name in duplicate_columns: left_table = left_table.withColumnRenamed(duplicate_column_name, duplicate_column_name + left_suffix) right_table = right_table.withColumnRenamed(duplicate_column_name, duplicate_column_name + right_suffix) join_condition = (left_table[on] == right_table[on] if on not in duplicate_columns else left_table[on + left_suffix] == right_table[on + right_suffix]) joined_table = left_table.join(right_table, join_condition, how=how) if on in duplicate_columns: # Merge duplicate key columns joined_table = joined_table.withColumnRenamed(on + left_suffix, on) joined_table = joined_table.drop(on + right_suffix) # Remove auxiliary index # FIXME Move index string to constant? joined_table = joined_table.drop('__index_level_0__') kdf = DataFrame(joined_table) return kdf def sample(self, n: Optional[int] = None, frac: Optional[float] = None, replace: bool = False, random_state: Optional[int] = None) -> 'DataFrame': """ Return a random sample of items from an axis of object. Please call this function using named argument by specifing the ``frac`` argument. You can use `random_state` for reproducibility. However, note that different from pandas, specifying a seed in Koalas/Spark does not guarantee the sampled rows will be fixed. The result set depends on not only the seed, but also how the data is distributed across machines and to some extent network randomness when shuffle operations are involved. Even in the simplest case, the result set will depend on the system's CPU core count. Parameters ---------- n : int, optional Number of items to return. This is currently NOT supported. Use frac instead. frac : float, optional Fraction of axis items to return. replace : bool, default False Sample with or without replacement. random_state : int, optional Seed for the random number generator (if int). Returns ------- Series or DataFrame A new object of same type as caller containing the sampled items. Examples -------- >>> df = ks.DataFrame({'num_legs': [2, 4, 8, 0], ... 'num_wings': [2, 0, 0, 0], ... 'num_specimen_seen': [10, 2, 1, 8]}, ... index=['falcon', 'dog', 'spider', 'fish'], ... columns=['num_legs', 'num_wings', 'num_specimen_seen']) >>> df # doctest: +SKIP num_legs num_wings num_specimen_seen falcon 2 2 10 dog 4 0 2 spider 8 0 1 fish 0 0 8 A random 25% sample of the ``DataFrame``. Note that we use `random_state` to ensure the reproducibility of the examples. >>> df.sample(frac=0.25, random_state=1) # doctest: +SKIP num_legs num_wings num_specimen_seen falcon 2 2 10 fish 0 0 8 Extract 25% random elements from the ``Series`` ``df['num_legs']``, with replacement, so the same items could appear more than once. >>> df['num_legs'].sample(frac=0.4, replace=True, random_state=1) # doctest: +SKIP falcon 2 spider 8 spider 8 Name: num_legs, dtype: int64 Specifying the exact number of items to return is not supported at the moment. >>> df.sample(n=5) # doctest: +ELLIPSIS Traceback (most recent call last): ... NotImplementedError: Function sample currently does not support specifying ... """ # Note: we don't run any of the doctests because the result can change depending on the # system's core count. if n is not None: raise NotImplementedError("Function sample currently does not support specifying " "exact number of items to return. Use frac instead.") if frac is None: raise ValueError("frac must be specified.") sdf = self._sdf.sample(withReplacement=replace, fraction=frac, seed=random_state) return DataFrame(sdf, self._metadata.copy()) def astype(self, dtype) -> 'DataFrame': """ Cast a pandas object to a specified dtype ``dtype``. Parameters ---------- dtype : data type, or dict of column name -> data type Use a numpy.dtype or Python type to cast entire pandas object to the same type. Alternatively, use {col: dtype, ...}, where col is a column label and dtype is a numpy.dtype or Python type to cast one or more of the DataFrame's columns to column-specific types. Returns ------- casted : same type as caller See Also -------- to_datetime : Convert argument to datetime. Examples -------- >>> df = ks.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, dtype='int64') >>> df a b 0 1 1 1 2 2 2 3 3 Convert to float type: >>> df.astype('float') a b 0 1.0 1.0 1 2.0 2.0 2 3.0 3.0 Convert to int64 type back: >>> df.astype('int64') a b 0 1 1 1 2 2 2 3 3 Convert column a to float type: >>> df.astype({'a': float}) a b 0 1.0 1 1 2.0 2 2 3.0 3 """ results = [] if is_dict_like(dtype): for col_name in dtype.keys(): if col_name not in self.columns: raise KeyError('Only a column name can be used for the ' 'key in a dtype mappings argument.') for col_name, col in self.iteritems(): if col_name in dtype: results.append(col.astype(dtype=dtype[col_name])) else: results.append(col) else: for col_name, col in self.iteritems(): results.append(col.astype(dtype=dtype)) sdf = self._sdf.select( self._metadata.index_columns + list(map(lambda ser: ser._scol, results))) return DataFrame(sdf, self._metadata.copy()) def _pd_getitem(self, key): from databricks.koalas.series import Series if key is None: raise KeyError("none key") if isinstance(key, str): try: return Series(self._sdf.__getitem__(key), anchor=self, index=self._metadata.index_map) except AnalysisException: raise KeyError(key) if np.isscalar(key) or isinstance(key, (tuple, str)): raise NotImplementedError(key) elif isinstance(key, slice): return self.loc[key] if isinstance(key, (pd.Series, np.ndarray, pd.Index)): raise NotImplementedError(key) if isinstance(key, list): return self.loc[:, key] if isinstance(key, DataFrame): # TODO Should not implement alignment, too dangerous? return Series(self._sdf.__getitem__(key), anchor=self, index=self._metadata.index_map) if isinstance(key, Series): # TODO Should not implement alignment, too dangerous? # It is assumed to be only a filter, otherwise .loc should be used. bcol = key._scol.cast("boolean") return DataFrame(self._sdf.filter(bcol), self._metadata.copy()) raise NotImplementedError(key) def __repr__(self): pdf = self.head(max_display_count + 1).to_pandas() pdf_length = len(pdf) repr_string = repr(pdf.iloc[:max_display_count]) if pdf_length > max_display_count: match = REPR_PATTERN.search(repr_string) if match is not None: nrows = match.group("rows") ncols = match.group("columns") footer = ("\n\n[Showing only the first {nrows} rows x {ncols} columns]" .format(nrows=nrows, ncols=ncols)) return REPR_PATTERN.sub(footer, repr_string) return repr_string def _repr_html_(self): pdf = self.head(max_display_count + 1).to_pandas() pdf_length = len(pdf) repr_html = pdf[:max_display_count]._repr_html_() if pdf_length > max_display_count: match = REPR_HTML_PATTERN.search(repr_html) if match is not None: nrows = match.group("rows") ncols = match.group("columns") by = chr(215) footer = ('\n<p>Showing only the first {rows} rows {by} {cols} columns</p>\n</div>' .format(rows=nrows, by=by, cols=ncols)) return REPR_HTML_PATTERN.sub(footer, repr_html) return repr_html def __getitem__(self, key): return self._pd_getitem(key) def __setitem__(self, key, value): from databricks.koalas.series import Series # For now, we don't support realignment against different dataframes. # This is too expensive in Spark. # Are we assigning against a column? if isinstance(value, Series): assert value._kdf is self, \ "Cannot combine column argument because it comes from a different dataframe" if isinstance(key, (tuple, list)): assert isinstance(value.schema, StructType) field_names = value.schema.fieldNames() kdf = self.assign(**{k: value[c] for k, c in zip(key, field_names)}) else: kdf = self.assign(**{key: value}) self._sdf = kdf._sdf self._metadata = kdf._metadata def __getattr__(self, key: str) -> Any: from databricks.koalas.series import Series if key.startswith("__") or key.startswith("_pandas_") or key.startswith("_spark_"): raise AttributeError(key) if hasattr(_MissingPandasLikeDataFrame, key): property_or_func = getattr(_MissingPandasLikeDataFrame, key) if isinstance(property_or_func, property): return property_or_func.fget(self) # type: ignore else: return partial(property_or_func, self) return Series(self._sdf.__getattr__(key), anchor=self, index=self._metadata.index_map) def __len__(self): return self._sdf.count() def __dir__(self): fields = [f for f in self._sdf.schema.fieldNames() if ' ' not in f] return super(DataFrame, self).__dir__() + fields @classmethod def _validate_axis(cls, axis=0): if axis not in (0, 1, 'index', 'columns', None): raise ValueError('No axis named {0}'.format(axis)) # convert to numeric axis return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis) def _reduce_spark_multi(sdf, aggs): """ Performs a reduction on a dataframe, the functions being known sql aggregate functions. """ assert isinstance(sdf, spark.DataFrame) sdf0 = sdf.agg(*aggs) l = sdf0.head(2) assert len(l) == 1, (sdf, l) row = l[0] l2 = list(row) assert len(l2) == len(aggs), (row, l2) return l2
36.824502
100
0.535493
import re import warnings from functools import partial, reduce from typing import Any, Optional, List, Tuple, Union import numpy as np import pandas as pd from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype, is_list_like, \ is_dict_like from pyspark import sql as spark from pyspark.sql import functions as F, Column from pyspark.sql.types import (BooleanType, ByteType, DecimalType, DoubleType, FloatType, IntegerType, LongType, ShortType, StructField, StructType, to_arrow_type) from pyspark.sql.utils import AnalysisException from databricks import koalas as ks from databricks.koalas.utils import default_session, validate_arguments_and_invoke_function from databricks.koalas.generic import _Frame, max_display_count from databricks.koalas.metadata import Metadata from databricks.koalas.missing.frame import _MissingPandasLikeDataFrame from databricks.koalas.ml import corr from databricks.koalas.typedef import infer_pd_series_spark_type REPR_PATTERN = re.compile(r"\n\n\[(?P<rows>[0-9]+) rows x (?P<columns>[0-9]+) columns\]$") REPR_HTML_PATTERN = re.compile( r"\n\<p\>(?P<rows>[0-9]+) rows × (?P<columns>[0-9]+) columns\<\/p\>\n\<\/div\>$") class DataFrame(_Frame): def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False): if isinstance(data, pd.DataFrame): assert index is None assert columns is None assert dtype is None assert not copy self._init_from_pandas(data) elif isinstance(data, spark.DataFrame): assert columns is None assert dtype is None assert not copy self._init_from_spark(data, index) else: pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy) self._init_from_pandas(pdf) def _init_from_pandas(self, pdf): metadata = Metadata.from_pandas(pdf) reset_index = pdf.reset_index() reset_index.columns = metadata.columns schema = StructType([StructField(name, infer_pd_series_spark_type(col), nullable=bool(col.isnull().any())) for name, col in reset_index.iteritems()]) for name, col in reset_index.iteritems(): dt = col.dtype if is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt): continue reset_index[name] = col.replace({np.nan: None}) self._init_from_spark(default_session().createDataFrame(reset_index, schema=schema), metadata) def _init_from_spark(self, sdf, metadata=None): self._sdf = sdf if metadata is None: self._metadata = Metadata(data_columns=self._sdf.schema.fieldNames()) else: self._metadata = metadata @property def _index_columns(self): return [self._sdf.__getitem__(field) for field in self._metadata.index_columns] def _reduce_for_stat_function(self, sfun): from inspect import signature exprs = [] num_args = len(signature(sfun).parameters) for col in self.columns: col_sdf = self._sdf[col] col_type = self._sdf.schema[col].dataType if isinstance(col_type, BooleanType) and sfun.__name__ not in ('min', 'max'): # Stat functions cannot be used with boolean values by default # Thus, cast to integer (true to 1 and false to 0) # Exclude the min and max methods though since those work with booleans col_sdf = col_sdf.cast('integer') if num_args == 1: # Only pass in the column if sfun accepts only one arg col_sdf = sfun(col_sdf) else: # must be 2 assert num_args == 2 # Pass in both the column and its data type if sfun accepts two args col_sdf = sfun(col_sdf, col_type) exprs.append(col_sdf.alias(col)) sdf = self._sdf.select(*exprs) pdf = sdf.toPandas() assert len(pdf) == 1, (sdf, pdf) row = pdf.iloc[0] row.name = None return row # Return first row as a Series def corr(self, method='pearson'): return corr(self, method) def iteritems(self): cols = list(self.columns) return list((col_name, self[col_name]) for col_name in cols) def to_clipboard(self, excel=True, sep=None, **kwargs): args = locals() kdf = self return validate_arguments_and_invoke_function( kdf.to_pandas(), self.to_clipboard, pd.DataFrame.to_clipboard, args) def to_html(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.', bold_rows=True, classes=None, escape=True, notebook=False, border=None, table_id=None, render_links=False): # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() if max_rows is not None: kdf = self.head(max_rows) else: kdf = self return validate_arguments_and_invoke_function( kdf.to_pandas(), self.to_html, pd.DataFrame.to_html, args) def to_string(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.', line_width=None): args = locals() if max_rows is not None: kdf = self.head(max_rows) else: kdf = self return validate_arguments_and_invoke_function( kdf.to_pandas(), self.to_string, pd.DataFrame.to_string, args) def to_dict(self, orient='dict', into=dict): # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() kdf = self return validate_arguments_and_invoke_function( kdf.to_pandas(), self.to_dict, pd.DataFrame.to_dict, args) def to_latex(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, bold_rows=False, column_format=None, longtable=None, escape=None, encoding=None, decimal='.', multicolumn=None, multicolumn_format=None, multirow=None): args = locals() kdf = self return validate_arguments_and_invoke_function( kdf.to_pandas(), self.to_latex, pd.DataFrame.to_latex, args) @property def index(self): from databricks.koalas.series import Series if len(self._metadata.index_map) != 1: raise KeyError('Currently supported only when the DataFrame has a single index.') return Series(self._index_columns[0], anchor=self, index=[]) def set_index(self, keys, drop=True, append=False, inplace=False): if isinstance(keys, str): keys = [keys] else: keys = list(keys) for key in keys: if key not in self.columns: raise KeyError(key) if drop: data_columns = [column for column in self._metadata.data_columns if column not in keys] else: data_columns = self._metadata.data_columns if append: index_map = self._metadata.index_map + [(column, column) for column in keys] else: index_map = [(column, column) for column in keys] metadata = self._metadata.copy(data_columns=data_columns, index_map=index_map) sdf = self._sdf.select(['`{}`'.format(name) for name in metadata.columns]) if inplace: self._metadata = metadata self._sdf = sdf else: kdf = self.copy() kdf._metadata = metadata kdf._sdf = sdf return kdf def reset_index(self, level=None, drop=False, inplace=False): # TODO: add example of MultiIndex back. See https://github.com/databricks/koalas/issues/301 if len(self._metadata.index_map) == 0: raise NotImplementedError('Can\'t reset index because there is no index.') multi_index = len(self._metadata.index_map) > 1 def rename(index): if multi_index: return 'level_{}'.format(index) else: if 'index' not in self._metadata.data_columns: return 'index' else: return 'level_{}'.format(index) if level is None: new_index_map = [(column, name if name is not None else rename(i)) for i, (column, name) in enumerate(self._metadata.index_map)] index_map = [] else: if isinstance(level, (int, str)): level = [level] level = list(level) if all(isinstance(l, int) for l in level): for lev in level: if lev >= len(self._metadata.index_map): raise IndexError('Too many levels: Index has only {} level, not {}' .format(len(self._metadata.index_map), lev + 1)) idx = level elif all(isinstance(lev, str) for lev in level): idx = [] for l in level: try: i = self._metadata.index_columns.index(l) idx.append(i) except ValueError: if multi_index: raise KeyError('Level unknown not found') else: raise KeyError('Level unknown must be same as name ({})' .format(self._metadata.index_columns[0])) else: raise ValueError('Level should be all int or all string.') idx.sort() new_index_map = [] index_map = self._metadata.index_map.copy() for i in idx: info = self._metadata.index_map[i] index_column, index_name = info new_index_map.append( (index_column, index_name if index_name is not None else rename(index_name))) index_map.remove(info) if drop: new_index_map = [] metadata = self._metadata.copy( data_columns=[column for column, _ in new_index_map] + self._metadata.data_columns, index_map=index_map) columns = [name for _, name in new_index_map] + self._metadata.data_columns if inplace: self._metadata = metadata self.columns = columns else: kdf = self.copy() kdf._metadata = metadata kdf.columns = columns return kdf def isnull(self): kdf = self.copy() for name, ks in kdf.iteritems(): kdf[name] = ks.isnull() return kdf isna = isnull def notnull(self): kdf = self.copy() for name, ks in kdf.iteritems(): kdf[name] = ks.notnull() return kdf notna = notnull def to_koalas(self): if isinstance(self, DataFrame): return self else: return DataFrame(self) def to_spark(self): return self._sdf def to_pandas(self): sdf = self._sdf.select(['`{}`'.format(name) for name in self._metadata.columns]) pdf = sdf.toPandas() if len(pdf) == 0 and len(sdf.schema) > 0: pdf = pdf.astype({field.name: to_arrow_type(field.dataType).to_pandas_dtype() for field in sdf.schema}) index_columns = self._metadata.index_columns if len(index_columns) > 0: append = False for index_field in index_columns: drop = index_field not in self._metadata.data_columns pdf = pdf.set_index(index_field, drop=drop, append=append) append = True pdf = pdf[self._metadata.data_columns] index_names = self._metadata.index_names if len(index_names) > 0: if isinstance(pdf.index, pd.MultiIndex): pdf.index.names = index_names else: pdf.index.name = index_names[0] return pdf toPandas = to_pandas def assign(self, **kwargs): from databricks.koalas.series import Series for k, v in kwargs.items(): if not (isinstance(v, (Series, spark.Column)) or callable(v) or pd.api.types.is_scalar(v)): raise TypeError("Column assignment doesn't support type " "{0}".format(type(v).__name__)) if callable(v): kwargs[k] = v(self) pairs = list(kwargs.items()) sdf = self._sdf for (name, c) in pairs: if isinstance(c, Series): sdf = sdf.withColumn(name, c._scol) elif isinstance(c, Column): sdf = sdf.withColumn(name, c) else: sdf = sdf.withColumn(name, F.lit(c)) data_columns = self._metadata.data_columns metadata = self._metadata.copy( data_columns=(data_columns + [name for name, _ in pairs if name not in data_columns])) return DataFrame(sdf, metadata) def to_records(self, index=True, convert_datetime64=None, column_dtypes=None, index_dtypes=None): args = locals() kdf = self return validate_arguments_and_invoke_function( kdf.to_pandas(), self.to_records, pd.DataFrame.to_records, args) def copy(self) -> 'DataFrame': return DataFrame(self._sdf, self._metadata.copy()) def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False): if axis == 0 or axis == 'index': if subset is not None: if isinstance(subset, str): columns = [subset] else: columns = list(subset) invalids = [column for column in columns if column not in self._metadata.data_columns] if len(invalids) > 0: raise KeyError(invalids) else: columns = list(self.columns) cnt = reduce(lambda x, y: x + y, [F.when(self[column].notna()._scol, 1).otherwise(0) for column in columns], F.lit(0)) if thresh is not None: pred = cnt >= F.lit(int(thresh)) elif how == 'any': pred = cnt == F.lit(len(columns)) elif how == 'all': pred = cnt > F.lit(0) else: if how is not None: raise ValueError('invalid how option: {h}'.format(h=how)) else: raise TypeError('must specify how or thresh') sdf = self._sdf.filter(pred) if inplace: self._sdf = sdf else: return DataFrame(sdf, self._metadata.copy()) else: raise NotImplementedError("dropna currently only works for axis=0 or axis='index'") def fillna(self, value=None, axis=None, inplace=False): if axis is None: axis = 0 if not (axis == 0 or axis == "index"): raise NotImplementedError("fillna currently only works for axis=0 or axis='index'") if value is None: raise ValueError('Currently must specify value') if not isinstance(value, (float, int, str, bool, dict, pd.Series)): raise TypeError("Unsupported type %s" % type(value)) if isinstance(value, pd.Series): value = value.to_dict() if isinstance(value, dict): for v in value.values(): if not isinstance(v, (float, int, str, bool)): raise TypeError("Unsupported type %s" % type(v)) sdf = self._sdf.fillna(value) if inplace: self._sdf = sdf else: return DataFrame(sdf, self._metadata.copy()) def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) \ -> 'DataFrame': if is_list_like(lower) or is_list_like(upper): raise ValueError("List-like value are not supported for 'lower' and 'upper' at the " + "moment") if lower is None and upper is None: return self sdf = self._sdf numeric_types = (DecimalType, DoubleType, FloatType, ByteType, IntegerType, LongType, ShortType) numeric_columns = [c for c in self.columns if isinstance(sdf.schema[c].dataType, numeric_types)] nonnumeric_columns = [c for c in self.columns if not isinstance(sdf.schema[c].dataType, numeric_types)] if lower is not None: sdf = sdf.select(*[F.when(F.col(c) < lower, lower).otherwise(F.col(c)).alias(c) for c in numeric_columns] + nonnumeric_columns) if upper is not None: sdf = sdf.select(*[F.when(F.col(c) > upper, upper).otherwise(F.col(c)).alias(c) for c in numeric_columns] + nonnumeric_columns) # Restore initial column order sdf = sdf.select(list(self.columns)) return ks.DataFrame(sdf) def head(self, n=5): return DataFrame(self._sdf.limit(n), self._metadata.copy()) @property def columns(self): return pd.Index(self._metadata.data_columns) @columns.setter def columns(self, names): old_names = self._metadata.data_columns if len(old_names) != len(names): raise ValueError( "Length mismatch: Expected axis has %d elements, new values have %d elements" % (len(old_names), len(names))) sdf = self._sdf.select(self._metadata.index_columns + [self[old_name]._scol.alias(new_name) for (old_name, new_name) in zip(old_names, names)]) self._sdf = sdf self._metadata = self._metadata.copy(data_columns=names) @property def dtypes(self): return pd.Series([self[col].dtype for col in self._metadata.data_columns], index=self._metadata.data_columns) def count(self): return self._reduce_for_stat_function(_Frame._count_expr) def drop(self, labels=None, axis=1, columns: Union[str, List[str]] = None): if labels is not None: axis = self._validate_axis(axis) if axis == 1: return self.drop(columns=labels) raise NotImplementedError("Drop currently only works for axis=1") elif columns is not None: if isinstance(columns, str): columns = [columns] sdf = self._sdf.drop(*columns) metadata = self._metadata.copy( data_columns=[column for column in self.columns if column not in columns] ) return DataFrame(sdf, metadata) else: raise ValueError("Need to specify at least one of 'labels' or 'columns'") def get(self, key, default=None): try: return self._pd_getitem(key) except (KeyError, ValueError, IndexError): return default def sort_values(self, by, ascending=True, inplace=False, na_position='last'): if isinstance(by, str): by = [by] if isinstance(ascending, bool): ascending = [ascending] * len(by) if len(ascending) != len(by): raise ValueError('Length of ascending ({}) != length of by ({})' .format(len(ascending), len(by))) if na_position not in ('first', 'last'): raise ValueError("invalid na_position: '{}'".format(na_position)) # Mapper: Get a spark column function for (ascending, na_position) combination # Note that 'asc_nulls_first' and friends were added as of Spark 2.4, see SPARK-23847. mapper = { (True, 'first'): lambda x: Column(getattr(x._jc, "asc_nulls_first")()), (True, 'last'): lambda x: Column(getattr(x._jc, "asc_nulls_last")()), (False, 'first'): lambda x: Column(getattr(x._jc, "desc_nulls_first")()), (False, 'last'): lambda x: Column(getattr(x._jc, "desc_nulls_last")()), } by = [mapper[(asc, na_position)](self[colname]._scol) for colname, asc in zip(by, ascending)] kdf = DataFrame(self._sdf.sort(*by), self._metadata.copy()) if inplace: self._sdf = kdf._sdf self._metadata = kdf._metadata else: return kdf # TODO: add keep = First def nlargest(self, n: int, columns: 'Any') -> 'DataFrame': return self.sort_values(by=columns, ascending=False).head(n=n) # TODO: add keep = First def nsmallest(self, n: int, columns: 'Any') -> 'DataFrame': return self.sort_values(by=columns, ascending=True).head(n=n) def isin(self, values): if isinstance(values, (pd.DataFrame, pd.Series)): raise NotImplementedError("DataFrame and Series are not supported") if isinstance(values, dict) and not set(values.keys()).issubset(self.columns): raise AttributeError( "'DataFrame' object has no attribute %s" % (set(values.keys()).difference(self.columns))) _select_columns = self._metadata.index_columns if isinstance(values, dict): for col in self.columns: if col in values: _select_columns.append(self[col]._scol.isin(values[col]).alias(col)) else: _select_columns.append(F.lit(False).alias(col)) elif is_list_like(values): _select_columns += [ self[col]._scol.isin(list(values)).alias(col) for col in self.columns] else: raise TypeError('Values should be iterable, Series, DataFrame or dict.') return DataFrame(self._sdf.select(_select_columns), self._metadata.copy()) def pipe(self, func, *args, **kwargs): if isinstance(func, tuple): func, target = func if target in kwargs: raise ValueError('%s is both the pipe target and a keyword ' 'argument' % target) kwargs[target] = self return func(*args, **kwargs) else: return func(self, *args, **kwargs) @property def shape(self): return len(self), len(self.columns) def merge(self, right: 'DataFrame', how: str = 'inner', on: str = None, left_index: bool = False, right_index: bool = False, suffixes: Tuple[str, str] = ('_x', '_y')) -> 'DataFrame': if on is None and not left_index and not right_index: raise ValueError("At least 'on' or 'left_index' and 'right_index' have to be set") if on is not None and (left_index or right_index): raise ValueError("Only 'on' or 'left_index' and 'right_index' can be set") if how == 'full': warnings.warn("Warning: While Koalas will accept 'full', you should use 'outer' " + "instead to be compatible with the pandas merge API", UserWarning) if how == 'outer': # 'outer' in pandas equals 'full' in Spark how = 'full' if how not in ('inner', 'left', 'right', 'full'): raise ValueError("The 'how' parameter has to be amongst the following values: ", "['inner', 'left', 'right', 'outer']") if on is None: # FIXME Move index string to constant? on = '__index_level_0__' left_table = self._sdf.alias('left_table') right_table = right._sdf.alias('right_table') # Unpack suffixes tuple for convenience left_suffix = suffixes[0] right_suffix = suffixes[1] # Append suffixes to columns with the same name to avoid conflicts later duplicate_columns = list(self.columns & right.columns) if duplicate_columns: for duplicate_column_name in duplicate_columns: left_table = left_table.withColumnRenamed(duplicate_column_name, duplicate_column_name + left_suffix) right_table = right_table.withColumnRenamed(duplicate_column_name, duplicate_column_name + right_suffix) join_condition = (left_table[on] == right_table[on] if on not in duplicate_columns else left_table[on + left_suffix] == right_table[on + right_suffix]) joined_table = left_table.join(right_table, join_condition, how=how) if on in duplicate_columns: # Merge duplicate key columns joined_table = joined_table.withColumnRenamed(on + left_suffix, on) joined_table = joined_table.drop(on + right_suffix) # Remove auxiliary index # FIXME Move index string to constant? joined_table = joined_table.drop('__index_level_0__') kdf = DataFrame(joined_table) return kdf def sample(self, n: Optional[int] = None, frac: Optional[float] = None, replace: bool = False, random_state: Optional[int] = None) -> 'DataFrame': # Note: we don't run any of the doctests because the result can change depending on the if n is not None: raise NotImplementedError("Function sample currently does not support specifying " "exact number of items to return. Use frac instead.") if frac is None: raise ValueError("frac must be specified.") sdf = self._sdf.sample(withReplacement=replace, fraction=frac, seed=random_state) return DataFrame(sdf, self._metadata.copy()) def astype(self, dtype) -> 'DataFrame': results = [] if is_dict_like(dtype): for col_name in dtype.keys(): if col_name not in self.columns: raise KeyError('Only a column name can be used for the ' 'key in a dtype mappings argument.') for col_name, col in self.iteritems(): if col_name in dtype: results.append(col.astype(dtype=dtype[col_name])) else: results.append(col) else: for col_name, col in self.iteritems(): results.append(col.astype(dtype=dtype)) sdf = self._sdf.select( self._metadata.index_columns + list(map(lambda ser: ser._scol, results))) return DataFrame(sdf, self._metadata.copy()) def _pd_getitem(self, key): from databricks.koalas.series import Series if key is None: raise KeyError("none key") if isinstance(key, str): try: return Series(self._sdf.__getitem__(key), anchor=self, index=self._metadata.index_map) except AnalysisException: raise KeyError(key) if np.isscalar(key) or isinstance(key, (tuple, str)): raise NotImplementedError(key) elif isinstance(key, slice): return self.loc[key] if isinstance(key, (pd.Series, np.ndarray, pd.Index)): raise NotImplementedError(key) if isinstance(key, list): return self.loc[:, key] if isinstance(key, DataFrame): # TODO Should not implement alignment, too dangerous? return Series(self._sdf.__getitem__(key), anchor=self, index=self._metadata.index_map) if isinstance(key, Series): # TODO Should not implement alignment, too dangerous? # It is assumed to be only a filter, otherwise .loc should be used. bcol = key._scol.cast("boolean") return DataFrame(self._sdf.filter(bcol), self._metadata.copy()) raise NotImplementedError(key) def __repr__(self): pdf = self.head(max_display_count + 1).to_pandas() pdf_length = len(pdf) repr_string = repr(pdf.iloc[:max_display_count]) if pdf_length > max_display_count: match = REPR_PATTERN.search(repr_string) if match is not None: nrows = match.group("rows") ncols = match.group("columns") footer = ("\n\n[Showing only the first {nrows} rows x {ncols} columns]" .format(nrows=nrows, ncols=ncols)) return REPR_PATTERN.sub(footer, repr_string) return repr_string def _repr_html_(self): pdf = self.head(max_display_count + 1).to_pandas() pdf_length = len(pdf) repr_html = pdf[:max_display_count]._repr_html_() if pdf_length > max_display_count: match = REPR_HTML_PATTERN.search(repr_html) if match is not None: nrows = match.group("rows") ncols = match.group("columns") by = chr(215) footer = ('\n<p>Showing only the first {rows} rows {by} {cols} columns</p>\n</div>' .format(rows=nrows, by=by, cols=ncols)) return REPR_HTML_PATTERN.sub(footer, repr_html) return repr_html def __getitem__(self, key): return self._pd_getitem(key) def __setitem__(self, key, value): from databricks.koalas.series import Series # For now, we don't support realignment against different dataframes. if isinstance(value, Series): assert value._kdf is self, \ "Cannot combine column argument because it comes from a different dataframe" if isinstance(key, (tuple, list)): assert isinstance(value.schema, StructType) field_names = value.schema.fieldNames() kdf = self.assign(**{k: value[c] for k, c in zip(key, field_names)}) else: kdf = self.assign(**{key: value}) self._sdf = kdf._sdf self._metadata = kdf._metadata def __getattr__(self, key: str) -> Any: from databricks.koalas.series import Series if key.startswith("__") or key.startswith("_pandas_") or key.startswith("_spark_"): raise AttributeError(key) if hasattr(_MissingPandasLikeDataFrame, key): property_or_func = getattr(_MissingPandasLikeDataFrame, key) if isinstance(property_or_func, property): return property_or_func.fget(self) else: return partial(property_or_func, self) return Series(self._sdf.__getattr__(key), anchor=self, index=self._metadata.index_map) def __len__(self): return self._sdf.count() def __dir__(self): fields = [f for f in self._sdf.schema.fieldNames() if ' ' not in f] return super(DataFrame, self).__dir__() + fields @classmethod def _validate_axis(cls, axis=0): if axis not in (0, 1, 'index', 'columns', None): raise ValueError('No axis named {0}'.format(axis)) return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis) def _reduce_spark_multi(sdf, aggs): assert isinstance(sdf, spark.DataFrame) sdf0 = sdf.agg(*aggs) l = sdf0.head(2) assert len(l) == 1, (sdf, l) row = l[0] l2 = list(row) assert len(l2) == len(aggs), (row, l2) return l2
true
true
f70285bf0c127bd9449019b899b0766290314975
11,080
py
Python
CodeGenerator.py
vdominguez1993/PlantUMLCodeGenerator
61bf3f4f11962c041f1d7ee5cdc3889b473d278b
[ "Apache-2.0" ]
null
null
null
CodeGenerator.py
vdominguez1993/PlantUMLCodeGenerator
61bf3f4f11962c041f1d7ee5cdc3889b473d278b
[ "Apache-2.0" ]
null
null
null
CodeGenerator.py
vdominguez1993/PlantUMLCodeGenerator
61bf3f4f11962c041f1d7ee5cdc3889b473d278b
[ "Apache-2.0" ]
null
null
null
import argparse, subprocess, os, re from jinja2 import Environment, FileSystemLoader def GetBaseName(full_path): return os.path.basename(full_path) class PlantUMLCodeGeneration(): class StateType(): def __init__(self): self.entry = None self.during = None self.exit = None self.transitions = [] self.submachine = [] def StringMe(self): return 'Entry: {} During: {} Exit: {} Transitions : {} Submachines: {}'.format( str(self.entry), str(self.during), str(self.exit), [transition.StringMe() for transition in self.transitions], [submachine.StringMe() for submachine in self.submachine] ) class TransitionType(): def __init__(self): self.destination = None self.conditions = None self.actions = None def StringMe(self): return 'Destination: {} Condition: {} Action: {}'.format( str(self.destination), str(self.conditions), str(self.actions) ) class StateMachineType(): def __init__(self): self.title = None self.states = {} self.notes = [] def StringMe(self): return 'Title: {}\nStates: \n\t{}\nNotes: {}\n'.format( str(self.title), '\n\t'.join([state + ' ' + self.states[state].StringMe() for state in self.states]), str(self.notes) ) def __init__(self, plantuml_file): if os.path.isfile(plantuml_file): self.plantuml_file = plantuml_file else: raise Exception('File {} does not exist.'.format(plantuml_file)) def CheckUml(self): if subprocess.call(['plantuml', '-checkonly', self.plantuml_file]) == 0: return True else: return False def GenerateCode(self, output_files, templates, no_check = False): if (no_check == False): if self.CheckUml() == False: raise Exception('File {} contains UML errors.'.format(self.plantuml_file)) uml, uml_params = self.ParseStateMachine() if len(output_files) == len(templates): for out_file, template in zip(output_files, templates): self.GenerateFromTemplate(out_file, template, uml, uml_params) else: raise Exception('Number of template and output files don\'t match.') def ParseStateMachine(self): uml = self.GetUMLText() uml_params = self.ParseStateMachineAsDict(uml_text = self.GetUMLText(grouped=True))[0] return uml, uml_params def GetUMLText(self, grouped = False): with open(self.plantuml_file, 'r') as plantuml_file: uml = plantuml_file.readlines() if grouped == False: return uml else: #Group all strings containing \ at the end uml_grouped = [] accumulated_string = '' for line in uml: #First strip the line to forget about leading and trailing #spaces line = line.strip() #Remove aliases line = re.sub('state\s+\".*\"\s+as','state', line) #Accumulate all lines that end with \ if line.endswith('\\'): accumulated_string += line[:-1] else: if accumulated_string == '': uml_grouped.append(line) else: uml_grouped.append(accumulated_string + line) accumulated_string = '' return uml_grouped def ParseStateMachineAsDict(self, uml_text, init_line = 0, submachine = False): uml_params = self.StateMachineType() line_num = init_line opening_braces = 0 closing_braces = 0 while line_num < len(uml_text): line = uml_text[line_num] if submachine: # Pending to refactor this opening_braces += line.count('{') closing_braces += line.count('}') if closing_braces > opening_braces: break # Regex magic yay! matchtransition = re.match('(\[\*\]|\w+)(?:|\s+)-->(?:|\s+)(\w+)(?:(?:|\s+)\:(.*))?',line) matchstateaction = re.match('(?:state\s+)?(\w+)(?:|\s+)(?:(?:|\s+)\:(.*))?',line) matchsubmachine = re.match('(?:state\s+)?(\w+)(?:|\s+)\{.*$',line) if line.startswith('title'): uml_params.title = line elif line.startswith('note'): note_match = re.match('.*\"(.*)\"', line) if note_match: uml_params.notes.append(self.__LineCleanup(note_match.group(1))) elif matchtransition: self.__AddTransition(uml_params, matchtransition) elif matchsubmachine: #Pending to do this in a more elegant way and not depending # on the order of the ifs state_name = matchstateaction.group(1) if uml_params.states.get(state_name) == None: uml_params.states[state_name] = self.StateType() sub_info = self.ParseStateMachineAsDict(uml_text, init_line = line_num + 1, submachine = True) #Set state name as title sub_info[0].title = state_name + '_submachine' uml_params.states[state_name].submachine.append(sub_info[0]) line_num = sub_info[1] elif matchstateaction: self.__AddStateActions(uml_params, matchstateaction) line_num += 1 return uml_params, line_num def __LineCleanup(self, line_string): cleaned_string = re.sub(r'(?<!\\)\\n','\n',line_string) cleaned_string = cleaned_string.replace('\\\\','\\').strip() return cleaned_string def __AddTransition(self, uml_params, matchtransition): transition = self.TransitionType() state_origin = matchtransition.group(1) transition.destination = matchtransition.group(2) text = matchtransition.group(3) if text is not None: text = text.split('\\ndo:\\n') conditions = text[0] transition.conditions = self.__LineCleanup(conditions) if len(text) > 1: actions = text[1] if text else None transition.actions = self.__LineCleanup(actions) #transition.actions = matchtransition.group(4) #Check if state exits, if not, create it if uml_params.states.get(state_origin) == None: uml_params.states[state_origin] = self.StateType() uml_params.states[state_origin].transitions.append(transition) #Also, create destination state if it does not exist if uml_params.states.get(transition.destination) == None: uml_params.states[transition.destination] = self.StateType() def __AddStateActions(self, uml_params, matchstateaction): state_name = matchstateaction.group(1) actions = matchstateaction.group(2) if uml_params.states.get(state_name) == None: uml_params.states[state_name] = self.StateType() #Get entry, exit and during if actions: #Do a regex split action_matches = re.split(r'(entry\:|during\:|exit\:)', actions) #Replace \n by real \n and trim action_matches = [self.__LineCleanup(line) for line in action_matches] #The list will start with an empty string (or spaces) if it does not match entry #any of the keywords. But if it starts with text it is a during if action_matches[0].strip() != '': uml_params.states[state_name].during = action_matches[0] line_num = 1 while line_num < len(action_matches): if action_matches[line_num] == 'entry:': uml_params.states[state_name].entry = action_matches[line_num + 1] line_num += 1 elif action_matches[line_num] == 'during:': uml_params.states[state_name].during = action_matches[line_num + 1] line_num += 1 elif action_matches[line_num] == 'exit:': uml_params.states[state_name].exit = action_matches[line_num + 1] line_num += 1 else: raise Exception('Action {} not recognized.'.format(action_matches[line_num])) line_num += 1 def GenerateFromTemplate(self, output_file, template_file, uml, uml_params): env = Environment( loader=FileSystemLoader(os.path.dirname(template_file)) ) template = env.get_template(os.path.basename(template_file)) with open(output_file, 'w') as out_file: out_file.write(template.render(file_name=output_file, uml=uml, uml_params=uml_params, get_submachines=self.GetSubmachineObjects, get_basename=GetBaseName)) def GetSubmachineObjects(self, uml_object): uml_submachines_list = [] for state in uml_object.states: if len(uml_object.states[state].submachine) > 0: for uml_submachine in uml_object.states[state].submachine: #Set title of submachine as the name of state parent uml_submachines_list.append(uml_submachine) #Recursion to get more levels uml_submachines_list += self.GetSubmachineObjects(uml_submachine) return uml_submachines_list if __name__ == "__main__": parser = argparse.ArgumentParser(description='Process PlantUML file to generate code') parser.add_argument('--input','-i', required = True, dest = 'plantuml_file', help ='Plant UML file from which to generate code') parser.add_argument('--output','-o', required = True, dest = 'output_files', help ='Code file generated. Separate by spaces in case of' 'more than one template', nargs='+') parser.add_argument('--templates', '-t', dest = 'templates', default = '[templates/C_code.c,templates/C_code.h]', help = 'Templates to be used separated by spaces', nargs='+') parser.add_argument('--no-check', action = 'store_true', help = 'This option is strongly discouraged. With this option' 'you are defining to not check that your PlantUML is valid.') args = parser.parse_args() plantuml_obj = PlantUMLCodeGeneration(args.plantuml_file) #Transform templates to list plantuml_obj.GenerateCode(args.output_files, args.templates)
43.28125
117
0.568231
import argparse, subprocess, os, re from jinja2 import Environment, FileSystemLoader def GetBaseName(full_path): return os.path.basename(full_path) class PlantUMLCodeGeneration(): class StateType(): def __init__(self): self.entry = None self.during = None self.exit = None self.transitions = [] self.submachine = [] def StringMe(self): return 'Entry: {} During: {} Exit: {} Transitions : {} Submachines: {}'.format( str(self.entry), str(self.during), str(self.exit), [transition.StringMe() for transition in self.transitions], [submachine.StringMe() for submachine in self.submachine] ) class TransitionType(): def __init__(self): self.destination = None self.conditions = None self.actions = None def StringMe(self): return 'Destination: {} Condition: {} Action: {}'.format( str(self.destination), str(self.conditions), str(self.actions) ) class StateMachineType(): def __init__(self): self.title = None self.states = {} self.notes = [] def StringMe(self): return 'Title: {}\nStates: \n\t{}\nNotes: {}\n'.format( str(self.title), '\n\t'.join([state + ' ' + self.states[state].StringMe() for state in self.states]), str(self.notes) ) def __init__(self, plantuml_file): if os.path.isfile(plantuml_file): self.plantuml_file = plantuml_file else: raise Exception('File {} does not exist.'.format(plantuml_file)) def CheckUml(self): if subprocess.call(['plantuml', '-checkonly', self.plantuml_file]) == 0: return True else: return False def GenerateCode(self, output_files, templates, no_check = False): if (no_check == False): if self.CheckUml() == False: raise Exception('File {} contains UML errors.'.format(self.plantuml_file)) uml, uml_params = self.ParseStateMachine() if len(output_files) == len(templates): for out_file, template in zip(output_files, templates): self.GenerateFromTemplate(out_file, template, uml, uml_params) else: raise Exception('Number of template and output files don\'t match.') def ParseStateMachine(self): uml = self.GetUMLText() uml_params = self.ParseStateMachineAsDict(uml_text = self.GetUMLText(grouped=True))[0] return uml, uml_params def GetUMLText(self, grouped = False): with open(self.plantuml_file, 'r') as plantuml_file: uml = plantuml_file.readlines() if grouped == False: return uml else: #Group all strings containing \ at the end uml_grouped = [] accumulated_string = '' for line in uml: #First strip the line to forget about leading and trailing #spaces line = line.strip() #Remove aliases line = re.sub('state\s+\".*\"\s+as','state', line) #Accumulate all lines that end with \ if line.endswith('\\'): accumulated_string += line[:-1] else: if accumulated_string == '': uml_grouped.append(line) else: uml_grouped.append(accumulated_string + line) accumulated_string = '' return uml_grouped def ParseStateMachineAsDict(self, uml_text, init_line = 0, submachine = False): uml_params = self.StateMachineType() line_num = init_line opening_braces = 0 closing_braces = 0 while line_num < len(uml_text): line = uml_text[line_num] if submachine: # Pending to refactor this opening_braces += line.count('{') closing_braces += line.count('}') if closing_braces > opening_braces: break # Regex magic yay! matchtransition = re.match('(\[\*\]|\w+)(?:|\s+)-->(?:|\s+)(\w+)(?:(?:|\s+)\:(.*))?',line) matchstateaction = re.match('(?:state\s+)?(\w+)(?:|\s+)(?:(?:|\s+)\:(.*))?',line) matchsubmachine = re.match('(?:state\s+)?(\w+)(?:|\s+)\{.*$',line) if line.startswith('title'): uml_params.title = line elif line.startswith('note'): note_match = re.match('.*\"(.*)\"', line) if note_match: uml_params.notes.append(self.__LineCleanup(note_match.group(1))) elif matchtransition: self.__AddTransition(uml_params, matchtransition) elif matchsubmachine: #Pending to do this in a more elegant way and not depending # on the order of the ifs state_name = matchstateaction.group(1) if uml_params.states.get(state_name) == None: uml_params.states[state_name] = self.StateType() sub_info = self.ParseStateMachineAsDict(uml_text, init_line = line_num + 1, submachine = True) #Set state name as title sub_info[0].title = state_name + '_submachine' uml_params.states[state_name].submachine.append(sub_info[0]) line_num = sub_info[1] elif matchstateaction: self.__AddStateActions(uml_params, matchstateaction) line_num += 1 return uml_params, line_num def __LineCleanup(self, line_string): cleaned_string = re.sub(r'(?<!\\)\\n','\n',line_string) cleaned_string = cleaned_string.replace('\\\\','\\').strip() return cleaned_string def __AddTransition(self, uml_params, matchtransition): transition = self.TransitionType() state_origin = matchtransition.group(1) transition.destination = matchtransition.group(2) text = matchtransition.group(3) if text is not None: text = text.split('\\ndo:\\n') conditions = text[0] transition.conditions = self.__LineCleanup(conditions) if len(text) > 1: actions = text[1] if text else None transition.actions = self.__LineCleanup(actions) #transition.actions = matchtransition.group(4) #Check if state exits, if not, create it if uml_params.states.get(state_origin) == None: uml_params.states[state_origin] = self.StateType() uml_params.states[state_origin].transitions.append(transition) #Also, create destination state if it does not exist if uml_params.states.get(transition.destination) == None: uml_params.states[transition.destination] = self.StateType() def __AddStateActions(self, uml_params, matchstateaction): state_name = matchstateaction.group(1) actions = matchstateaction.group(2) if uml_params.states.get(state_name) == None: uml_params.states[state_name] = self.StateType() #Get entry, exit and during if actions: #Do a regex split action_matches = re.split(r'(entry\:|during\:|exit\:)', actions) #Replace \n by real \n and trim action_matches = [self.__LineCleanup(line) for line in action_matches] #The list will start with an empty string (or spaces) if it does not match entry #any of the keywords. But if it starts with text it is a during if action_matches[0].strip() != '': uml_params.states[state_name].during = action_matches[0] line_num = 1 while line_num < len(action_matches): if action_matches[line_num] == 'entry:': uml_params.states[state_name].entry = action_matches[line_num + 1] line_num += 1 elif action_matches[line_num] == 'during:': uml_params.states[state_name].during = action_matches[line_num + 1] line_num += 1 elif action_matches[line_num] == 'exit:': uml_params.states[state_name].exit = action_matches[line_num + 1] line_num += 1 else: raise Exception('Action {} not recognized.'.format(action_matches[line_num])) line_num += 1 def GenerateFromTemplate(self, output_file, template_file, uml, uml_params): env = Environment( loader=FileSystemLoader(os.path.dirname(template_file)) ) template = env.get_template(os.path.basename(template_file)) with open(output_file, 'w') as out_file: out_file.write(template.render(file_name=output_file, uml=uml, uml_params=uml_params, get_submachines=self.GetSubmachineObjects, get_basename=GetBaseName)) def GetSubmachineObjects(self, uml_object): uml_submachines_list = [] for state in uml_object.states: if len(uml_object.states[state].submachine) > 0: for uml_submachine in uml_object.states[state].submachine: #Set title of submachine as the name of state parent uml_submachines_list.append(uml_submachine) #Recursion to get more levels uml_submachines_list += self.GetSubmachineObjects(uml_submachine) return uml_submachines_list if __name__ == "__main__": parser = argparse.ArgumentParser(description='Process PlantUML file to generate code') parser.add_argument('--input','-i', required = True, dest = 'plantuml_file', help ='Plant UML file from which to generate code') parser.add_argument('--output','-o', required = True, dest = 'output_files', help ='Code file generated. Separate by spaces in case of' 'more than one template', nargs='+') parser.add_argument('--templates', '-t', dest = 'templates', default = '[templates/C_code.c,templates/C_code.h]', help = 'Templates to be used separated by spaces', nargs='+') parser.add_argument('--no-check', action = 'store_true', help = 'This option is strongly discouraged. With this option' 'you are defining to not check that your PlantUML is valid.') args = parser.parse_args() plantuml_obj = PlantUMLCodeGeneration(args.plantuml_file) #Transform templates to list plantuml_obj.GenerateCode(args.output_files, args.templates)
true
true
f70288269e894757296237fedfbe8a7b9a17cae6
889
py
Python
doc/examples/transform/plot_pyramid.py
paalge/scikit-image
f3c4b88b0610242b033449fd38c1118475f96a73
[ "BSD-3-Clause" ]
null
null
null
doc/examples/transform/plot_pyramid.py
paalge/scikit-image
f3c4b88b0610242b033449fd38c1118475f96a73
[ "BSD-3-Clause" ]
2
2016-01-08T18:30:49.000Z
2016-07-21T07:55:29.000Z
doc/examples/transform/plot_pyramid.py
paalge/scikit-image
f3c4b88b0610242b033449fd38c1118475f96a73
[ "BSD-3-Clause" ]
2
2017-05-09T13:33:37.000Z
2018-12-23T10:57:18.000Z
""" ==================== Build image pyramids ==================== The ``pyramid_gaussian`` function takes an image and yields successive images shrunk by a constant scale factor. Image pyramids are often used, e.g., to implement algorithms for denoising, texture discrimination, and scale- invariant detection. """ import numpy as np import matplotlib.pyplot as plt from skimage import data from skimage.transform import pyramid_gaussian image = data.astronaut() rows, cols, dim = image.shape pyramid = tuple(pyramid_gaussian(image, downscale=2)) composite_image = np.zeros((rows, cols + cols / 2, 3), dtype=np.double) composite_image[:rows, :cols, :] = pyramid[0] i_row = 0 for p in pyramid[1:]: n_rows, n_cols = p.shape[:2] composite_image[i_row:i_row + n_rows, cols:cols + n_cols] = p i_row += n_rows fig, ax = plt.subplots() ax.imshow(composite_image) plt.show()
24.694444
77
0.704162
import numpy as np import matplotlib.pyplot as plt from skimage import data from skimage.transform import pyramid_gaussian image = data.astronaut() rows, cols, dim = image.shape pyramid = tuple(pyramid_gaussian(image, downscale=2)) composite_image = np.zeros((rows, cols + cols / 2, 3), dtype=np.double) composite_image[:rows, :cols, :] = pyramid[0] i_row = 0 for p in pyramid[1:]: n_rows, n_cols = p.shape[:2] composite_image[i_row:i_row + n_rows, cols:cols + n_cols] = p i_row += n_rows fig, ax = plt.subplots() ax.imshow(composite_image) plt.show()
true
true
f70289491eddd352d457d19b402af7a9836267ff
4,623
py
Python
thrift/lib/py3/test/interactions/interaction_test.py
sakibguy/fbthrift
8123a9192519072e119ac9817c6b59a35b98b81c
[ "Apache-2.0" ]
2,112
2015-01-02T11:34:27.000Z
2022-03-31T16:30:42.000Z
thrift/lib/py3/test/interactions/interaction_test.py
sakibguy/fbthrift
8123a9192519072e119ac9817c6b59a35b98b81c
[ "Apache-2.0" ]
372
2015-01-05T10:40:09.000Z
2022-03-31T20:45:11.000Z
thrift/lib/py3/test/interactions/interaction_test.py
sakibguy/fbthrift
8123a9192519072e119ac9817c6b59a35b98b81c
[ "Apache-2.0" ]
582
2015-01-03T01:51:56.000Z
2022-03-31T02:01:09.000Z
# Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import unittest from blank_interaction.services import BlankServiceInterface from interaction.clients import Calculator from interaction.types import Point from thrift.py3.client import get_client, ClientType from .run_interaction import run_interaction class InteractionTest(unittest.TestCase): def setUp(self) -> None: self.interaction = run_interaction() def init_client(self) -> Calculator: return get_client( Calculator, port=self.interaction.getPort(), host="::1", client_type=ClientType.THRIFT_ROCKET_CLIENT_TYPE, ) def tearDown(self) -> None: self.interaction.reset() def test_basic(self) -> None: async def inner_test() -> None: async with self.init_client() as calc: self.assertEqual(await calc.addPrimitive(0, 0), 0) async with calc.createAddition() as add: self.assertEqual(await add.getPrimitive(), 0) add.accumulatePrimitive(1) self.assertEqual(await add.getPrimitive(), 1) point = await add.getPoint() self.assertEqual(point.x, 0) self.assertEqual(point.y, 0) newPoint = Point(x=2, y=3) await add.accumulatePoint(newPoint) point = await add.getPoint() self.assertEqual(point.x, 2) self.assertEqual(point.y, 3) await add.noop() asyncio.run(inner_test()) def test_multiple_interactions(self) -> None: async def inner_test() -> None: async with self.init_client() as calc: self.assertEqual(await calc.addPrimitive(0, 0), 0) async with calc.createAddition() as add: self.assertEqual(await add.getPrimitive(), 0) add.accumulatePrimitive(1) self.assertEqual(await add.getPrimitive(), 1) async with calc.createAddition() as add: self.assertEqual(await add.getPrimitive(), 0) add.accumulatePrimitive(2) self.assertEqual(await add.getPrimitive(), 2) asyncio.run(inner_test()) def test_multiple_clients(self) -> None: async def inner_test() -> None: async with self.init_client() as calc: self.assertEqual(await calc.addPrimitive(0, 0), 0) async with calc.createAddition() as add: self.assertEqual(await add.getPrimitive(), 0) add.accumulatePrimitive(1) self.assertEqual(await add.getPrimitive(), 1) async with self.init_client() as calc: self.assertEqual(await calc.addPrimitive(0, 1), 1) async with calc.createAddition() as add: self.assertEqual(await add.getPrimitive(), 0) add.accumulatePrimitive(2) self.assertEqual(await add.getPrimitive(), 2) asyncio.run(inner_test()) def test_terminate_unused(self) -> None: async def inner_test() -> None: async with self.init_client() as calc: async with calc.createAddition() as _: pass asyncio.run(inner_test()) def test_terminate_client_error(self) -> None: class SpecificError(Exception): pass async def inner_test() -> None: try: async with self.init_client() as calc: self.assertEqual(await calc.addPrimitive(0, 0), 0) async with calc.createAddition() as add: add.accumulatePrimitive(1) raise SpecificError("Generic error") except SpecificError: pass else: self.fail("Didn't throw SpecificError") asyncio.run(inner_test())
35.837209
74
0.589011
import asyncio import unittest from blank_interaction.services import BlankServiceInterface from interaction.clients import Calculator from interaction.types import Point from thrift.py3.client import get_client, ClientType from .run_interaction import run_interaction class InteractionTest(unittest.TestCase): def setUp(self) -> None: self.interaction = run_interaction() def init_client(self) -> Calculator: return get_client( Calculator, port=self.interaction.getPort(), host="::1", client_type=ClientType.THRIFT_ROCKET_CLIENT_TYPE, ) def tearDown(self) -> None: self.interaction.reset() def test_basic(self) -> None: async def inner_test() -> None: async with self.init_client() as calc: self.assertEqual(await calc.addPrimitive(0, 0), 0) async with calc.createAddition() as add: self.assertEqual(await add.getPrimitive(), 0) add.accumulatePrimitive(1) self.assertEqual(await add.getPrimitive(), 1) point = await add.getPoint() self.assertEqual(point.x, 0) self.assertEqual(point.y, 0) newPoint = Point(x=2, y=3) await add.accumulatePoint(newPoint) point = await add.getPoint() self.assertEqual(point.x, 2) self.assertEqual(point.y, 3) await add.noop() asyncio.run(inner_test()) def test_multiple_interactions(self) -> None: async def inner_test() -> None: async with self.init_client() as calc: self.assertEqual(await calc.addPrimitive(0, 0), 0) async with calc.createAddition() as add: self.assertEqual(await add.getPrimitive(), 0) add.accumulatePrimitive(1) self.assertEqual(await add.getPrimitive(), 1) async with calc.createAddition() as add: self.assertEqual(await add.getPrimitive(), 0) add.accumulatePrimitive(2) self.assertEqual(await add.getPrimitive(), 2) asyncio.run(inner_test()) def test_multiple_clients(self) -> None: async def inner_test() -> None: async with self.init_client() as calc: self.assertEqual(await calc.addPrimitive(0, 0), 0) async with calc.createAddition() as add: self.assertEqual(await add.getPrimitive(), 0) add.accumulatePrimitive(1) self.assertEqual(await add.getPrimitive(), 1) async with self.init_client() as calc: self.assertEqual(await calc.addPrimitive(0, 1), 1) async with calc.createAddition() as add: self.assertEqual(await add.getPrimitive(), 0) add.accumulatePrimitive(2) self.assertEqual(await add.getPrimitive(), 2) asyncio.run(inner_test()) def test_terminate_unused(self) -> None: async def inner_test() -> None: async with self.init_client() as calc: async with calc.createAddition() as _: pass asyncio.run(inner_test()) def test_terminate_client_error(self) -> None: class SpecificError(Exception): pass async def inner_test() -> None: try: async with self.init_client() as calc: self.assertEqual(await calc.addPrimitive(0, 0), 0) async with calc.createAddition() as add: add.accumulatePrimitive(1) raise SpecificError("Generic error") except SpecificError: pass else: self.fail("Didn't throw SpecificError") asyncio.run(inner_test())
true
true
f702895d53bfab5f192f3b620f6222bfa4a5bb35
18,264
py
Python
server.py
jonasswa/Guess-My-Search
2b36183a66bf55b163fc647117ee1b9c8f9cd543
[ "MIT" ]
1
2017-12-18T14:00:53.000Z
2017-12-18T14:00:53.000Z
server.py
soldox/Guess-My-Search
2b36183a66bf55b163fc647117ee1b9c8f9cd543
[ "MIT" ]
null
null
null
server.py
soldox/Guess-My-Search
2b36183a66bf55b163fc647117ee1b9c8f9cd543
[ "MIT" ]
null
null
null
from flask_socketio import SocketIO from flask import Flask, make_response, request, session from flask import render_template, session, url_for, redirect from threading import RLock from threading import Thread from utilslib import list_to_HTML_table from time import sleep from ClientStorage import Clients, User from gameObjects import Game, GameContainter, Player, ChatMmsg from random import shuffle #Init server app = Flask(__name__, template_folder='templates', static_folder='static') app.config['SECRET_KEY'] = 'lskwod=91230?=)ASD?=)("")@' socketio = SocketIO(app, async_mode='threading') timerLock = RLock() asyncLock = RLock() clients = Clients() games = GameContainter() debugging = False @app.route('/', methods = ['POST', 'GET']) @app.route('/index', methods = ['POST', 'GET']) def index(): verbose = (False or debugging) error = request.args.get('error') return make_response(render_template('makeGame.html', title = "Welcome", cool = 123, error = error)) @app.route('/gameRoom', methods = ['POST', 'GET']) def gameRoom(): global games verbose = (False or debugging) argumentsMakeGame = ['name', 'gameName', 'nrOfRounds', 'time', 'newGame'] argumentsJoinGame = ['name', 'gameName', 'newGame'] uniqueID = request.cookies.get('uniqueID') user = clients.find_User_By_uniqueID(uniqueID) if (not user): return redirect(url_for('index') + '?error=No user. Refreshing') if (not user.gameObject): data = request.form #MAKE A NEW GAME if data['newGame'] == 'yes': if verbose: print('In server:gameRoom() nrOfRounds set!') for key in data.keys(): argumentsMakeGame.remove(key) if argumentsMakeGame: return redirect(url_for('index') + '?error=Not enough arguments when creating the game') if verbose: print('In server:gameRoom() arguments needed for making a game are present') #Creating player and game game = games.add_Game(gameName=data['gameName'], nrOfRounds=data['nrOfRounds'], timePerRound=data['time']) player = game.add_Player(name=data['name'], userObject=user) if (not player): return redirect(url_for('index') + '?error=Player name already exists in this game...') if verbose: print('In server:gameRoom() game created with the name {} and user/player added'.format(game.gameName)) #Join an existing game else: data = request.form if verbose: print('In server:gameRoom() joining a game!') for key in data.keys(): argumentsJoinGame.remove(key) if argumentsJoinGame: return redirect(url_for('index') + '?error=Not enough arguments when joining the game') if verbose: print('In server:gameRoom() Searching for game: {}'.format(data['gameName'])) #Check if game exists game = games.find_Game_By_Name(data['gameName'], verbose) if (not game): if verbose: print('The game was not found') return redirect(url_for('index') + '?error=Game not found') #Check if name already taken for player in game.players: if player.name == data['name']: return redirect(url_for('index') + '?error=Name already taken') player = game.add_Player(name=data['name'], userObject=user) if verbose: print('In server:gameRoom() Player joined game') if verbose: print('In server:gameRoom() game created and user/player added') sendMessageToGame(game, '{} joined the game'.format(data['name'])) emitToGame(game = game, arg = ('refresh_Player_List',{}), lock = timerLock) else: if verbose: print('User alreade in game') error = None return make_response(render_template('gameRoom.html', title = "Game Room", gameName = user.gameObject.gameName, error = error)) @app.route('/gameRoomContent') def gameRoomContent(): uniqueID = request.cookies.get('uniqueID') user = clients.find_User_By_uniqueID(uniqueID) if userNotComplete(user, verbose = (False or debugging)): return 'ERROR: Something strange happened. Please leave game and rejoin' game = user.gameObject nrOfRounds = game.nrOfRounds timePerRound = game.timePerRound gameName = game.gameName roundNr = game.currentRound if (user.gameObject.get_Stage() == 'lobby'): return render_template('lobbyContent.html', gameName = gameName, nrOfRounds = nrOfRounds, timePerRound = timePerRound) elif (user.gameObject.get_Stage() == 'roundStart'): return render_template('roundContentStart.html', timePerRound = timePerRound, roundNr = roundNr, nrOfRounds = nrOfRounds) elif (user.gameObject.get_Stage() == 'roundSupply'): game.spawnedThread = None game.reset_Players_Ready() emitToGame(game = game, arg = ('refresh_Player_List',{}), lock = timerLock) print('GameContent:') print(game.get_Search_Strings(user.playerObject)) return render_template('roundContentSupply.html', nrOfPlayers = game.get_Nr_Of_Players(), searchStrings = game.get_Search_Strings(user.playerObject), nrOfEntries = game.nrOfEntry) elif (user.gameObject.get_Stage() == 'roundVote'): game.reset_Players_Ready() return makeVoteContent(user) elif (user.gameObject.get_Stage() == 'roundEnd'): game.reset_Players_Ready() return makeRoundEnd(user) elif (user.gameObject.get_Stage() == 'gameSummary'): game.reset_Players_Ready() return render_template('gameContentSummary.html') def makeVoteContent(user): game = user.gameObject playerObject = user.playerObject notReady = False voteEntries = game.get_Vote_Entries(playerObject) return render_template('roundContentVote.html', notReady = notReady, voteEntries = voteEntries) def makeRoundEnd(user): game = user.gameObject playerObject = user.playerObject playersPoints = {} for player in game.players: playersPoints[player.name] = player.points searchStrings = {} for entry in game.entries: searchStrings[entry.searchString] = {} return render_template('roundContentEnd.html', playersPoints = playersPoints) @app.route('/playerList') def playerList(): uniqueID = request.cookies.get('uniqueID') user = clients.find_User_By_uniqueID(uniqueID) verbose = (False or debugging) if userNotComplete(user, verbose = (False or debugging)): return redirect(url_for('index') + '?error=User not in game') playerList = user.gameObject.get_Player_Names_And_Status() if verbose: print('Got {} players'.format(len(playerList))) return render_template('playerList.html', playerList = playerList) @app.route('/chatContent') def chatContent(): uniqueID = request.cookies.get('uniqueID') user = clients.find_User_By_uniqueID(uniqueID) if userNotComplete(user, verbose = (False or debugging)): return redirect(url_for('index') + '?error=User not in game') chat = user.gameObject.chatMessages msgs = [] players = [] for msg in chat: player, msg = msg.get_Player_And_Msg() msgs.append(str(msg)) players.append(str(player)) if players: players.reverse() msgs.reverse() return render_template('chat.html', players = players, chatMsg = msgs) @app.route('/leave_Game') def leaveGame(): verbose = (False or debugging) uniqueID = request.cookies.get('uniqueID') user = clients.find_User_By_uniqueID(uniqueID) if (not user): if verbose: print('No user') return redirect(url_for('index')) game = user.gameObject game.remove_Player_By_User_Object(user) name = user.playerObject.name user.resetUser() if len(game.players)<1: games.removeGame(game=game, verbose = verbose) else: emitToGame(game = game, arg = ('refresh_Player_List',{}), lock = timerLock) emitToGame(game = game, arg = ('client_warning',{'msg': name+' left the game'}), lock = timerLock) print (len(games._games)) return redirect(url_for('index')) @socketio.on('submit_entry') def submitEntry(msg): verbose = (False or debugging) if verbose: print ('Entry reveived by the server') uniqueID = request.cookies.get('uniqueID') user = clients.find_User_By_uniqueID(uniqueID) if verbose: print ('User retrieved') if (not user): if verbose: print('No user found when collecting the data') return if user.playerObject.entry: if verbose: print('User already submitted.') return if verbose: print ('Setting entry for user') user.gameObject.add_Entry(msg['searchString'], msg['suggestion'], user.playerObject) if verbose: print('Got entry') if user.gameObject.nrOfEntry >= user.gameObject.get_Nr_Of_Players(): emitToGame(game = user.gameObject, arg = ('refresh_div_content',{'div': 'entryList', 'cont': '/gameRoomContent'}), lock = timerLock) @socketio.on('submit_supply') def submitSupply(data): verbose = (False or debugging) if verbose: print ('\n---------------------\nSupply reveived by the server') uniqueID = request.cookies.get('uniqueID') user = clients.find_User_By_uniqueID(uniqueID) if (not user): if verbose: print('No user found when collecting the data') return game = user.gameObject if verbose: print ('The data received is: {}'.format(data)) if verbose: print ('player {} found'.format(user.playerObject.name)) if (not data): return if verbose: print('') if verbose: print('The actual data:') for key, value in data.items(): if verbose: print('Key: {} \t Value: {}'.format(key, value)) if value == '': continue game.entries[int(key)].add_Autocomplete(value, user.playerObject) if verbose: print('') game.nrOfSupply += 1 if verbose: print ('The game has received {}nr of supplies\n---------------------\n'.format(game.nrOfSupply)) #All "supplies" are received if user.gameObject.nrOfSupply >= user.gameObject.get_Nr_Of_Players(): if verbose: print ('We should now refresh the div content') emitToGame(game = user.gameObject, arg = ('refresh_div_content', {'div': 'contentVote', 'cont': '/gameRoomContent'}), lock = timerLock) #emitToGame(game = user.gameObject, arg = ('refresh_div_content',{'div': 'entryList', 'cont': '/gameRoomContent'}), lock = timerLock) if verbose and False: print('') for entry in game.entries: print('-------------------------------------------') print('The entry with the serch string: \t {}\nHas the following autocompletes added:'.format(entry.searchString)) for supply in entry.otherAutocompletes: print (supply.autoComplete) print('-------------------------------------------') print('') @socketio.on('submit_favorite') def submitFavorite(favorite): print('The server received a favorite: {}'.format(favorite)) uniqueID = request.cookies.get('uniqueID') user = clients.find_User_By_uniqueID(uniqueID) game = user.gameObject autoComplete = game.get_Autocomlete_by_ID(favorite) if (not autoComplete): user.playerObject.points -= 1 return user.playerObject.autocompleteVotedFor = autoComplete if (autoComplete.isGoogle): user.playerObject.points += 1 return autoComplete.playerObject.points += 1 return @socketio.on('toggle_ready') def toggleReady(msg): verbose = (True or debugging) uniqueID = request.cookies.get('uniqueID') user = clients.find_User_By_uniqueID(uniqueID) if (not user): if verbose: print('No user found when toggling ready') return player = user.playerObject if (not player): if verbose: print('No player found for the user/client.') player.ready = not player.ready game = player.gameObject #A game object will always exist if there is a playerObject emitToGame(game = game, arg = ('refresh_Player_List',{}), lock = timerLock) playersReady = game.all_Players_Ready() if verbose: print ('STAGE:', game.get_Stage()) #Start round if playersReady and game.gameStarted == False and not game.spawnedThread: game.gameStarted = True game.reset_Players_Ready() emitToGame(game = game, arg = ('change_content', {'url':'/gameRoomContent'}), lock = timerLock) emitToGame(game = game, arg = ('client_message', {'msg':'Game started. Have fun!'}), lock = timerLock) #Start timer game.spawnedThread = RoundTimer(int(game.timePerRound), user) game.spawnedThread.start() return #End round if playersReady and game.get_Stage() == 'roundStart': if verbose: print ('Round ended by users') user.gameObject.end_Stage() game.reset_Players_Ready() if verbose: print('Current stage of game is: {}'.format(user.gameObject.get_Stage())) emitToGame(game = user.gameObject, arg = ('round_End', {}), lock = timerLock) emitToGame(game = user.gameObject, arg = ('client_message', {'msg':'Round ended'}), lock = timerLock) return #End supply if playersReady and game.get_Stage() == 'roundSupply': user.gameObject.end_Stage() game.reset_Players_Ready() emitToGame(game = user.gameObject, arg = ('supply_End', {'nrOfEntries': user.gameObject.nrOfEntry}), lock = timerLock) emitToGame(game = user.gameObject, arg = ('client_message', {'msg':'Round ended'}), lock = timerLock) return #End vote if playersReady and game.get_Stage() == 'roundVote': user.gameObject.end_Stage() game.reset_Players_Ready() emitToGame(game = user.gameObject, arg = ('vote_End', {}), lock = timerLock) emitToGame(game = user.gameObject, arg = ('client_message', {'msg':'Vote ended'}), lock = timerLock) return class RoundTimer(Thread): def __init__(self, timeToWait, user): Thread.__init__(self) self.timeToWait = timeToWait self.user = user def run(self): sleep(self.timeToWait) if (not self.user.gameObject) or (self.user.gameObject.roundEnded): return self.user.gameObject.end_Stage() emitToGame(game = self.user.gameObject, arg = ('round_End', {'url':'/gameRoomContent'}), lock = timerLock) emitToGame(game = self.user.gameObject, arg = ('client_message', {'msg':'Round ended'}), lock = timerLock) return @socketio.on('handle_chat') def handleChat(msg): #update_chat verbose = (False or debugging) uniqueID = request.cookies.get('uniqueID') user = clients.find_User_By_uniqueID(uniqueID) if (not user): if verbose: print('No user') return redirect(url_for('index')) game = user.gameObject if (not game): if verbose: print('No game found when handling chat') return game.add_Chat_Msg(chatMsg=msg, playerName=user.playerObject.name) emitToGame(game=game, arg=('update_chat',{}), lock=timerLock) @socketio.on('connected') def client_connect(): verbose = (False or debugging) ''' I need to identify the user. If the user reloads, the session ID will change. A unique user-key is provisided for each new user, and the session ID is updated when the user reconnects. The unique ID is stored in a cookie. ''' if verbose: print('Someone connected with the IP: {}'.format(request.remote_addr)) uniqueID = request.cookies.get('uniqueID') if verbose: print('\nUnique ID before update: {}'.format(uniqueID)) if uniqueID: if verbose: print('Unique ID cookie found') user = clients.find_User_By_uniqueID(uniqueID) if user: if verbose: print('User found') if request.sid != user.sid: user.sid = request.sid if verbose: print('Updated the SID') else: user = clients.add_User(sid=request.sid) if verbose: print('User created') user.uniqueID = uniqueID if verbose: print('Unique ID updated') else: if verbose: print('Made a new user') user = clients.add_User(sid=request.sid) if verbose: print('Emitted to server: set_cookie') emit(arg=('set_cookie', {'name': 'uniqueID' , 'data': user.uniqueID}), uniqueID = None, lock = timerLock, user= user) def sendMessageToGame(game, msg): for player in game.players: emit(arg = ('client_message', {'msg': msg}), uniqueID = None, lock = timerLock, user= player.userObject) def emitToGame(arg, game, lock): for player in game.players: emit(arg = arg, uniqueID = None, lock = lock, user = player.userObject) def emit(arg, uniqueID, lock, user = None): ''' An emit method that requires a lock. Dunno if I need this... TODO: Find out if i need the lock. ''' verbose = (False or debugging) with lock: if verbose: print ('Did an emit') if (not user): userSID = clients.find_User_By_uniqueID(uniqueID).sid else: userSID = user.sid socketio.emit(*arg, room = userSID) def userNotComplete(user, verbose = (False or debugging)): if verbose: print('\nUser name: {}'.format(user.name)) print('User gameObject pointer {}'.format(user.gameObject)) print('User playerObject pointer {}\n'.format(user.playerObject)) if ((not user) or (not user.gameObject) or (not user.playerObject)): return True else: return False if __name__ == "__main__": socketio.run(app, debug = False)
36.166337
143
0.636498
from flask_socketio import SocketIO from flask import Flask, make_response, request, session from flask import render_template, session, url_for, redirect from threading import RLock from threading import Thread from utilslib import list_to_HTML_table from time import sleep from ClientStorage import Clients, User from gameObjects import Game, GameContainter, Player, ChatMmsg from random import shuffle app = Flask(__name__, template_folder='templates', static_folder='static') app.config['SECRET_KEY'] = 'lskwod=91230?=)ASD?=)("")@' socketio = SocketIO(app, async_mode='threading') timerLock = RLock() asyncLock = RLock() clients = Clients() games = GameContainter() debugging = False @app.route('/', methods = ['POST', 'GET']) @app.route('/index', methods = ['POST', 'GET']) def index(): verbose = (False or debugging) error = request.args.get('error') return make_response(render_template('makeGame.html', title = "Welcome", cool = 123, error = error)) @app.route('/gameRoom', methods = ['POST', 'GET']) def gameRoom(): global games verbose = (False or debugging) argumentsMakeGame = ['name', 'gameName', 'nrOfRounds', 'time', 'newGame'] argumentsJoinGame = ['name', 'gameName', 'newGame'] uniqueID = request.cookies.get('uniqueID') user = clients.find_User_By_uniqueID(uniqueID) if (not user): return redirect(url_for('index') + '?error=No user. Refreshing') if (not user.gameObject): data = request.form if data['newGame'] == 'yes': if verbose: print('In server:gameRoom() nrOfRounds set!') for key in data.keys(): argumentsMakeGame.remove(key) if argumentsMakeGame: return redirect(url_for('index') + '?error=Not enough arguments when creating the game') if verbose: print('In server:gameRoom() arguments needed for making a game are present') game = games.add_Game(gameName=data['gameName'], nrOfRounds=data['nrOfRounds'], timePerRound=data['time']) player = game.add_Player(name=data['name'], userObject=user) if (not player): return redirect(url_for('index') + '?error=Player name already exists in this game...') if verbose: print('In server:gameRoom() game created with the name {} and user/player added'.format(game.gameName)) else: data = request.form if verbose: print('In server:gameRoom() joining a game!') for key in data.keys(): argumentsJoinGame.remove(key) if argumentsJoinGame: return redirect(url_for('index') + '?error=Not enough arguments when joining the game') if verbose: print('In server:gameRoom() Searching for game: {}'.format(data['gameName'])) game = games.find_Game_By_Name(data['gameName'], verbose) if (not game): if verbose: print('The game was not found') return redirect(url_for('index') + '?error=Game not found') for player in game.players: if player.name == data['name']: return redirect(url_for('index') + '?error=Name already taken') player = game.add_Player(name=data['name'], userObject=user) if verbose: print('In server:gameRoom() Player joined game') if verbose: print('In server:gameRoom() game created and user/player added') sendMessageToGame(game, '{} joined the game'.format(data['name'])) emitToGame(game = game, arg = ('refresh_Player_List',{}), lock = timerLock) else: if verbose: print('User alreade in game') error = None return make_response(render_template('gameRoom.html', title = "Game Room", gameName = user.gameObject.gameName, error = error)) @app.route('/gameRoomContent') def gameRoomContent(): uniqueID = request.cookies.get('uniqueID') user = clients.find_User_By_uniqueID(uniqueID) if userNotComplete(user, verbose = (False or debugging)): return 'ERROR: Something strange happened. Please leave game and rejoin' game = user.gameObject nrOfRounds = game.nrOfRounds timePerRound = game.timePerRound gameName = game.gameName roundNr = game.currentRound if (user.gameObject.get_Stage() == 'lobby'): return render_template('lobbyContent.html', gameName = gameName, nrOfRounds = nrOfRounds, timePerRound = timePerRound) elif (user.gameObject.get_Stage() == 'roundStart'): return render_template('roundContentStart.html', timePerRound = timePerRound, roundNr = roundNr, nrOfRounds = nrOfRounds) elif (user.gameObject.get_Stage() == 'roundSupply'): game.spawnedThread = None game.reset_Players_Ready() emitToGame(game = game, arg = ('refresh_Player_List',{}), lock = timerLock) print('GameContent:') print(game.get_Search_Strings(user.playerObject)) return render_template('roundContentSupply.html', nrOfPlayers = game.get_Nr_Of_Players(), searchStrings = game.get_Search_Strings(user.playerObject), nrOfEntries = game.nrOfEntry) elif (user.gameObject.get_Stage() == 'roundVote'): game.reset_Players_Ready() return makeVoteContent(user) elif (user.gameObject.get_Stage() == 'roundEnd'): game.reset_Players_Ready() return makeRoundEnd(user) elif (user.gameObject.get_Stage() == 'gameSummary'): game.reset_Players_Ready() return render_template('gameContentSummary.html') def makeVoteContent(user): game = user.gameObject playerObject = user.playerObject notReady = False voteEntries = game.get_Vote_Entries(playerObject) return render_template('roundContentVote.html', notReady = notReady, voteEntries = voteEntries) def makeRoundEnd(user): game = user.gameObject playerObject = user.playerObject playersPoints = {} for player in game.players: playersPoints[player.name] = player.points searchStrings = {} for entry in game.entries: searchStrings[entry.searchString] = {} return render_template('roundContentEnd.html', playersPoints = playersPoints) @app.route('/playerList') def playerList(): uniqueID = request.cookies.get('uniqueID') user = clients.find_User_By_uniqueID(uniqueID) verbose = (False or debugging) if userNotComplete(user, verbose = (False or debugging)): return redirect(url_for('index') + '?error=User not in game') playerList = user.gameObject.get_Player_Names_And_Status() if verbose: print('Got {} players'.format(len(playerList))) return render_template('playerList.html', playerList = playerList) @app.route('/chatContent') def chatContent(): uniqueID = request.cookies.get('uniqueID') user = clients.find_User_By_uniqueID(uniqueID) if userNotComplete(user, verbose = (False or debugging)): return redirect(url_for('index') + '?error=User not in game') chat = user.gameObject.chatMessages msgs = [] players = [] for msg in chat: player, msg = msg.get_Player_And_Msg() msgs.append(str(msg)) players.append(str(player)) if players: players.reverse() msgs.reverse() return render_template('chat.html', players = players, chatMsg = msgs) @app.route('/leave_Game') def leaveGame(): verbose = (False or debugging) uniqueID = request.cookies.get('uniqueID') user = clients.find_User_By_uniqueID(uniqueID) if (not user): if verbose: print('No user') return redirect(url_for('index')) game = user.gameObject game.remove_Player_By_User_Object(user) name = user.playerObject.name user.resetUser() if len(game.players)<1: games.removeGame(game=game, verbose = verbose) else: emitToGame(game = game, arg = ('refresh_Player_List',{}), lock = timerLock) emitToGame(game = game, arg = ('client_warning',{'msg': name+' left the game'}), lock = timerLock) print (len(games._games)) return redirect(url_for('index')) @socketio.on('submit_entry') def submitEntry(msg): verbose = (False or debugging) if verbose: print ('Entry reveived by the server') uniqueID = request.cookies.get('uniqueID') user = clients.find_User_By_uniqueID(uniqueID) if verbose: print ('User retrieved') if (not user): if verbose: print('No user found when collecting the data') return if user.playerObject.entry: if verbose: print('User already submitted.') return if verbose: print ('Setting entry for user') user.gameObject.add_Entry(msg['searchString'], msg['suggestion'], user.playerObject) if verbose: print('Got entry') if user.gameObject.nrOfEntry >= user.gameObject.get_Nr_Of_Players(): emitToGame(game = user.gameObject, arg = ('refresh_div_content',{'div': 'entryList', 'cont': '/gameRoomContent'}), lock = timerLock) @socketio.on('submit_supply') def submitSupply(data): verbose = (False or debugging) if verbose: print ('\n---------------------\nSupply reveived by the server') uniqueID = request.cookies.get('uniqueID') user = clients.find_User_By_uniqueID(uniqueID) if (not user): if verbose: print('No user found when collecting the data') return game = user.gameObject if verbose: print ('The data received is: {}'.format(data)) if verbose: print ('player {} found'.format(user.playerObject.name)) if (not data): return if verbose: print('') if verbose: print('The actual data:') for key, value in data.items(): if verbose: print('Key: {} \t Value: {}'.format(key, value)) if value == '': continue game.entries[int(key)].add_Autocomplete(value, user.playerObject) if verbose: print('') game.nrOfSupply += 1 if verbose: print ('The game has received {}nr of supplies\n---------------------\n'.format(game.nrOfSupply)) if user.gameObject.nrOfSupply >= user.gameObject.get_Nr_Of_Players(): if verbose: print ('We should now refresh the div content') emitToGame(game = user.gameObject, arg = ('refresh_div_content', {'div': 'contentVote', 'cont': '/gameRoomContent'}), lock = timerLock) if verbose and False: print('') for entry in game.entries: print('-------------------------------------------') print('The entry with the serch string: \t {}\nHas the following autocompletes added:'.format(entry.searchString)) for supply in entry.otherAutocompletes: print (supply.autoComplete) print('-------------------------------------------') print('') @socketio.on('submit_favorite') def submitFavorite(favorite): print('The server received a favorite: {}'.format(favorite)) uniqueID = request.cookies.get('uniqueID') user = clients.find_User_By_uniqueID(uniqueID) game = user.gameObject autoComplete = game.get_Autocomlete_by_ID(favorite) if (not autoComplete): user.playerObject.points -= 1 return user.playerObject.autocompleteVotedFor = autoComplete if (autoComplete.isGoogle): user.playerObject.points += 1 return autoComplete.playerObject.points += 1 return @socketio.on('toggle_ready') def toggleReady(msg): verbose = (True or debugging) uniqueID = request.cookies.get('uniqueID') user = clients.find_User_By_uniqueID(uniqueID) if (not user): if verbose: print('No user found when toggling ready') return player = user.playerObject if (not player): if verbose: print('No player found for the user/client.') player.ready = not player.ready game = player.gameObject emitToGame(game = game, arg = ('refresh_Player_List',{}), lock = timerLock) playersReady = game.all_Players_Ready() if verbose: print ('STAGE:', game.get_Stage()) if playersReady and game.gameStarted == False and not game.spawnedThread: game.gameStarted = True game.reset_Players_Ready() emitToGame(game = game, arg = ('change_content', {'url':'/gameRoomContent'}), lock = timerLock) emitToGame(game = game, arg = ('client_message', {'msg':'Game started. Have fun!'}), lock = timerLock) game.spawnedThread = RoundTimer(int(game.timePerRound), user) game.spawnedThread.start() return if playersReady and game.get_Stage() == 'roundStart': if verbose: print ('Round ended by users') user.gameObject.end_Stage() game.reset_Players_Ready() if verbose: print('Current stage of game is: {}'.format(user.gameObject.get_Stage())) emitToGame(game = user.gameObject, arg = ('round_End', {}), lock = timerLock) emitToGame(game = user.gameObject, arg = ('client_message', {'msg':'Round ended'}), lock = timerLock) return if playersReady and game.get_Stage() == 'roundSupply': user.gameObject.end_Stage() game.reset_Players_Ready() emitToGame(game = user.gameObject, arg = ('supply_End', {'nrOfEntries': user.gameObject.nrOfEntry}), lock = timerLock) emitToGame(game = user.gameObject, arg = ('client_message', {'msg':'Round ended'}), lock = timerLock) return if playersReady and game.get_Stage() == 'roundVote': user.gameObject.end_Stage() game.reset_Players_Ready() emitToGame(game = user.gameObject, arg = ('vote_End', {}), lock = timerLock) emitToGame(game = user.gameObject, arg = ('client_message', {'msg':'Vote ended'}), lock = timerLock) return class RoundTimer(Thread): def __init__(self, timeToWait, user): Thread.__init__(self) self.timeToWait = timeToWait self.user = user def run(self): sleep(self.timeToWait) if (not self.user.gameObject) or (self.user.gameObject.roundEnded): return self.user.gameObject.end_Stage() emitToGame(game = self.user.gameObject, arg = ('round_End', {'url':'/gameRoomContent'}), lock = timerLock) emitToGame(game = self.user.gameObject, arg = ('client_message', {'msg':'Round ended'}), lock = timerLock) return @socketio.on('handle_chat') def handleChat(msg): verbose = (False or debugging) uniqueID = request.cookies.get('uniqueID') user = clients.find_User_By_uniqueID(uniqueID) if (not user): if verbose: print('No user') return redirect(url_for('index')) game = user.gameObject if (not game): if verbose: print('No game found when handling chat') return game.add_Chat_Msg(chatMsg=msg, playerName=user.playerObject.name) emitToGame(game=game, arg=('update_chat',{}), lock=timerLock) @socketio.on('connected') def client_connect(): verbose = (False or debugging) if verbose: print('Someone connected with the IP: {}'.format(request.remote_addr)) uniqueID = request.cookies.get('uniqueID') if verbose: print('\nUnique ID before update: {}'.format(uniqueID)) if uniqueID: if verbose: print('Unique ID cookie found') user = clients.find_User_By_uniqueID(uniqueID) if user: if verbose: print('User found') if request.sid != user.sid: user.sid = request.sid if verbose: print('Updated the SID') else: user = clients.add_User(sid=request.sid) if verbose: print('User created') user.uniqueID = uniqueID if verbose: print('Unique ID updated') else: if verbose: print('Made a new user') user = clients.add_User(sid=request.sid) if verbose: print('Emitted to server: set_cookie') emit(arg=('set_cookie', {'name': 'uniqueID' , 'data': user.uniqueID}), uniqueID = None, lock = timerLock, user= user) def sendMessageToGame(game, msg): for player in game.players: emit(arg = ('client_message', {'msg': msg}), uniqueID = None, lock = timerLock, user= player.userObject) def emitToGame(arg, game, lock): for player in game.players: emit(arg = arg, uniqueID = None, lock = lock, user = player.userObject) def emit(arg, uniqueID, lock, user = None): verbose = (False or debugging) with lock: if verbose: print ('Did an emit') if (not user): userSID = clients.find_User_By_uniqueID(uniqueID).sid else: userSID = user.sid socketio.emit(*arg, room = userSID) def userNotComplete(user, verbose = (False or debugging)): if verbose: print('\nUser name: {}'.format(user.name)) print('User gameObject pointer {}'.format(user.gameObject)) print('User playerObject pointer {}\n'.format(user.playerObject)) if ((not user) or (not user.gameObject) or (not user.playerObject)): return True else: return False if __name__ == "__main__": socketio.run(app, debug = False)
true
true
f7028a63f253b20a569bb4743e35afa609f63526
6,332
py
Python
deeplabv3/datahandler.py
RajArPatra/Improvement-semantic-segmentation-using-clustring-and-class-voating
1e4b5fa5ccc462d88a68f3c88c8af31fa3f14b8b
[ "MIT" ]
null
null
null
deeplabv3/datahandler.py
RajArPatra/Improvement-semantic-segmentation-using-clustring-and-class-voating
1e4b5fa5ccc462d88a68f3c88c8af31fa3f14b8b
[ "MIT" ]
null
null
null
deeplabv3/datahandler.py
RajArPatra/Improvement-semantic-segmentation-using-clustring-and-class-voating
1e4b5fa5ccc462d88a68f3c88c8af31fa3f14b8b
[ "MIT" ]
3
2020-04-19T06:46:25.000Z
2020-04-24T07:48:15.000Z
from torch.utils.data import Dataset, DataLoader import glob import os import numpy as np import cv2 import torch from torchvision import transforms, utils from skimage.transform import resize class SegDataset(Dataset): """Segmentation Dataset""" def __init__(self, root_dir, imageFolder, maskFolder, transform=None, seed=None, fraction=None, subset=None, imagecolormode='rgb', maskcolormode='grayscale'): """ Args: root_dir (string): Directory with all the images and should have the following structure. root --Images -----Img 1 -----Img N --Mask -----Mask 1 -----Mask N imageFolder (string) = 'Images' : Name of the folder which contains the Images. maskFolder (string) = 'Masks : Name of the folder which contains the Masks. transform (callable, optional): Optional transform to be applied on a sample. seed: Specify a seed for the train and test split fraction: A float value from 0 to 1 which specifies the validation split fraction subset: 'Train' or 'Test' to select the appropriate set. imagecolormode: 'rgb' or 'grayscale' maskcolormode: 'rgb' or 'grayscale' """ self.color_dict = {'rgb': 1, 'grayscale': 0} assert(imagecolormode in ['rgb', 'grayscale']) assert(maskcolormode in ['rgb', 'grayscale']) self.imagecolorflag = self.color_dict[imagecolormode] self.maskcolorflag = self.color_dict[maskcolormode] self.root_dir = root_dir self.transform = transform if not fraction: self.image_names = sorted( glob.glob(os.path.join(self.root_dir, imageFolder, '*'))) self.mask_names = sorted( glob.glob(os.path.join(self.root_dir, maskFolder, '*'))) else: assert(subset in ['Train', 'Test']) self.fraction = fraction self.image_list = np.array( sorted(glob.glob(os.path.join(self.root_dir, imageFolder, '*')))) self.mask_list = np.array( sorted(glob.glob(os.path.join(self.root_dir, maskFolder, '*')))) if seed: np.random.seed(seed) indices = np.arange(len(self.image_list)) np.random.shuffle(indices) self.image_list = self.image_list[indices] self.mask_list = self.mask_list[indices] if subset == 'Train': self.image_names = self.image_list[:int( np.ceil(len(self.image_list)*(1-self.fraction)))] self.mask_names = self.mask_list[:int( np.ceil(len(self.mask_list)*(1-self.fraction)))] else: self.image_names = self.image_list[int( np.ceil(len(self.image_list)*(1-self.fraction))):] self.mask_names = self.mask_list[int( np.ceil(len(self.mask_list)*(1-self.fraction))):] def __len__(self): return len(self.image_names) def __getitem__(self, idx): img_name = self.image_names[idx] if self.imagecolorflag: image = cv2.imread( img_name, self.imagecolorflag).transpose(2, 0, 1) else: image = cv2.imread(img_name, self.imagecolorflag) msk_name = self.mask_names[idx] if self.maskcolorflag: mask = cv2.imread(msk_name, self.maskcolorflag).transpose(2, 0, 1) else: mask = cv2.imread(msk_name, self.maskcolorflag) sample = {'image': image, 'mask': mask} if self.transform: sample = self.transform(sample) return sample # Define few transformations for the Segmentation Dataloader class Resize(object): """Resize image and/or masks.""" def __init__(self, imageresize, maskresize): self.imageresize = imageresize self.maskresize = maskresize def __call__(self, sample): image, mask = sample['image'], sample['mask'] if len(image.shape) == 3: image = image.transpose(1, 2, 0) if len(mask.shape) == 3: mask = mask.transpose(1, 2, 0) mask = cv2.resize(mask, self.maskresize, cv2.INTER_AREA) #mask = 256 * resize(mask, (256, 256), anti_aliasing = True) image = cv2.resize(image, self.imageresize, cv2.INTER_AREA) #image = 256 * resize(image, (256, 256), anti_aliasing = True) if len(image.shape) == 3: image = image.transpose(2, 0, 1) if len(mask.shape) == 3: mask = mask.transpose(2, 0, 1) return {'image': image, 'mask': mask} class ToTensor(object): """Convert ndarrays in sample to Tensors.""" def __call__(self, sample, maskresize=None, imageresize=None): image, mask = sample['image'], sample['mask'] if len(mask.shape) == 2: mask = mask.reshape((1,)+mask.shape) if len(image.shape) == 2: image = image.reshape((1,)+image.shape) return {'image': torch.from_numpy(image), 'mask': torch.from_numpy(mask)} class Normalize(object): '''Normalize image''' def __call__(self, sample): image, mask = sample['image'], sample['mask'] return {'image': image.type(torch.FloatTensor)/255, 'mask': mask.type(torch.FloatTensor)/255} def get_dataloader_single_folder(data_dir, imageFolder='Images', maskFolder='Masks', fraction=0.2, batch_size=4): """ Create training and testing dataloaders from a single folder. """ data_transforms = { 'Train': transforms.Compose([Resize((256, 256), (256, 256)), ToTensor(), Normalize()]), 'Test': transforms.Compose([Resize((256,256), (256, 256)), ToTensor(), Normalize()]), } image_datasets = {x: SegDataset(data_dir, imageFolder=imageFolder, maskFolder=maskFolder, seed=100, fraction=fraction, subset=x, transform=data_transforms[x]) for x in ['Train', 'Test']} dataloaders = {x: DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=8) for x in ['Train', 'Test']} return dataloaders
39.08642
162
0.58844
from torch.utils.data import Dataset, DataLoader import glob import os import numpy as np import cv2 import torch from torchvision import transforms, utils from skimage.transform import resize class SegDataset(Dataset): def __init__(self, root_dir, imageFolder, maskFolder, transform=None, seed=None, fraction=None, subset=None, imagecolormode='rgb', maskcolormode='grayscale'): self.color_dict = {'rgb': 1, 'grayscale': 0} assert(imagecolormode in ['rgb', 'grayscale']) assert(maskcolormode in ['rgb', 'grayscale']) self.imagecolorflag = self.color_dict[imagecolormode] self.maskcolorflag = self.color_dict[maskcolormode] self.root_dir = root_dir self.transform = transform if not fraction: self.image_names = sorted( glob.glob(os.path.join(self.root_dir, imageFolder, '*'))) self.mask_names = sorted( glob.glob(os.path.join(self.root_dir, maskFolder, '*'))) else: assert(subset in ['Train', 'Test']) self.fraction = fraction self.image_list = np.array( sorted(glob.glob(os.path.join(self.root_dir, imageFolder, '*')))) self.mask_list = np.array( sorted(glob.glob(os.path.join(self.root_dir, maskFolder, '*')))) if seed: np.random.seed(seed) indices = np.arange(len(self.image_list)) np.random.shuffle(indices) self.image_list = self.image_list[indices] self.mask_list = self.mask_list[indices] if subset == 'Train': self.image_names = self.image_list[:int( np.ceil(len(self.image_list)*(1-self.fraction)))] self.mask_names = self.mask_list[:int( np.ceil(len(self.mask_list)*(1-self.fraction)))] else: self.image_names = self.image_list[int( np.ceil(len(self.image_list)*(1-self.fraction))):] self.mask_names = self.mask_list[int( np.ceil(len(self.mask_list)*(1-self.fraction))):] def __len__(self): return len(self.image_names) def __getitem__(self, idx): img_name = self.image_names[idx] if self.imagecolorflag: image = cv2.imread( img_name, self.imagecolorflag).transpose(2, 0, 1) else: image = cv2.imread(img_name, self.imagecolorflag) msk_name = self.mask_names[idx] if self.maskcolorflag: mask = cv2.imread(msk_name, self.maskcolorflag).transpose(2, 0, 1) else: mask = cv2.imread(msk_name, self.maskcolorflag) sample = {'image': image, 'mask': mask} if self.transform: sample = self.transform(sample) return sample class Resize(object): def __init__(self, imageresize, maskresize): self.imageresize = imageresize self.maskresize = maskresize def __call__(self, sample): image, mask = sample['image'], sample['mask'] if len(image.shape) == 3: image = image.transpose(1, 2, 0) if len(mask.shape) == 3: mask = mask.transpose(1, 2, 0) mask = cv2.resize(mask, self.maskresize, cv2.INTER_AREA) image = cv2.resize(image, self.imageresize, cv2.INTER_AREA) if len(image.shape) == 3: image = image.transpose(2, 0, 1) if len(mask.shape) == 3: mask = mask.transpose(2, 0, 1) return {'image': image, 'mask': mask} class ToTensor(object): def __call__(self, sample, maskresize=None, imageresize=None): image, mask = sample['image'], sample['mask'] if len(mask.shape) == 2: mask = mask.reshape((1,)+mask.shape) if len(image.shape) == 2: image = image.reshape((1,)+image.shape) return {'image': torch.from_numpy(image), 'mask': torch.from_numpy(mask)} class Normalize(object): def __call__(self, sample): image, mask = sample['image'], sample['mask'] return {'image': image.type(torch.FloatTensor)/255, 'mask': mask.type(torch.FloatTensor)/255} def get_dataloader_single_folder(data_dir, imageFolder='Images', maskFolder='Masks', fraction=0.2, batch_size=4): data_transforms = { 'Train': transforms.Compose([Resize((256, 256), (256, 256)), ToTensor(), Normalize()]), 'Test': transforms.Compose([Resize((256,256), (256, 256)), ToTensor(), Normalize()]), } image_datasets = {x: SegDataset(data_dir, imageFolder=imageFolder, maskFolder=maskFolder, seed=100, fraction=fraction, subset=x, transform=data_transforms[x]) for x in ['Train', 'Test']} dataloaders = {x: DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=8) for x in ['Train', 'Test']} return dataloaders
true
true
f7028b5259524a51a3a82357b0d2679eaf4c8043
4,277
py
Python
tests/utils/test_polydata_utils.py
UCL/scikit-surgeryvtk
7d88da36b509158a5385e15fb11f69923fa13fa5
[ "BSD-3-Clause" ]
9
2020-05-29T12:03:12.000Z
2021-06-17T07:14:16.000Z
tests/utils/test_polydata_utils.py
NMontanaBrown/scikit-surgeryvtk
85921775b72f40cdf4ee606ab83531758b0345bb
[ "BSD-3-Clause" ]
164
2020-05-20T07:57:47.000Z
2021-12-08T09:43:26.000Z
tests/utils/test_polydata_utils.py
SciKit-Surgery/scikit-surgeryvtk
75a2cb15f976348b844fea165bddf187efa722f0
[ "BSD-3-Clause" ]
2
2020-10-16T13:50:34.000Z
2021-11-05T13:08:08.000Z
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*- import pytest import vtk import numpy as np import sksurgeryvtk.utils.polydata_utils as pdu import sksurgeryvtk.models.vtk_surface_model as vbs def test_overlapping_bounds(): radius_0=10.0 radius_1=7.0 centre_1=5.0 radius_2=4.0 centre_2=15.0 radius_3=4.0 centre_3=0.0 sphere_0 = vtk.vtkSphereSource() sphere_0.SetRadius(radius_0) sphere_0.SetPhiResolution(12) sphere_0.SetThetaResolution(12) sphere_0.SetCenter(0.0, 0.0, 0.0) sphere_0.Update() vtk_model_0 = sphere_0.GetOutput() sphere_1 = vtk.vtkSphereSource() sphere_1.SetRadius(radius_1) sphere_1.SetPhiResolution(12) sphere_1.SetThetaResolution(21) sphere_1.SetCenter(centre_1, 0.0, 0.0) sphere_1.Update() vtk_model_1 = sphere_1.GetOutput() sphere_2 = vtk.vtkSphereSource() sphere_2.SetRadius(radius_2) sphere_2.SetPhiResolution(12) sphere_2.SetThetaResolution(21) sphere_2.SetCenter(centre_2, 0.0, 0.0) sphere_2.Update() vtk_model_2 = sphere_2.GetOutput() sphere_3 = vtk.vtkSphereSource() sphere_3.SetRadius(radius_3) sphere_3.SetPhiResolution(12) sphere_3.SetThetaResolution(21) sphere_3.SetCenter(centre_3, 0.0, 0.0) sphere_3.Update() vtk_model_3 = sphere_3.GetOutput() assert (pdu.check_overlapping_bounds( vtk_model_0, vtk_model_1)) assert (pdu.check_overlapping_bounds( vtk_model_1, vtk_model_0)) assert (not pdu.check_overlapping_bounds( vtk_model_0, vtk_model_2)) assert (not pdu.check_overlapping_bounds( vtk_model_2, vtk_model_0)) assert (pdu.check_overlapping_bounds( vtk_model_0, vtk_model_3)) assert (pdu.check_overlapping_bounds( vtk_model_3, vtk_model_0)) def test_dice_overlap(): radius_0=10.0 radius_1=7.0 centre_1=5.0 sphere_0 = vtk.vtkSphereSource() sphere_0.SetRadius(radius_0) sphere_0.SetPhiResolution(60) sphere_0.SetThetaResolution(60) sphere_0.SetCenter(0.0, 0.0, 0.0) sphere_0.Update() vtk_model_0 = sphere_0.GetOutput() sphere_1 = vtk.vtkSphereSource() sphere_1.SetRadius(radius_1) sphere_1.SetPhiResolution(60) sphere_1.SetThetaResolution(60) sphere_1.SetCenter(centre_1, 0.0, 0.0) sphere_1.Update() vtk_model_1 = sphere_1.GetOutput() dice, volume_0, volume_1, volume_01 = pdu.two_polydata_dice(vtk_model_0, vtk_model_1) np.testing.assert_approx_equal(volume_0, 4.0 * np.pi * radius_0**3.0 / 3.0, significant=2) np.testing.assert_approx_equal(volume_1, 4.0 * np.pi * radius_1**3.0 / 3.0, significant=2) #from http://mathworld.wolfram.com/Sphere-SphereIntersection.html cap_height_0 = ( radius_1 - radius_0 + centre_1) * ( radius_1 + radius_0 - centre_1) / (2 * centre_1) cap_height_1 = ( radius_0 - radius_1 + centre_1) * ( radius_0 + radius_1 - centre_1) / (2 * centre_1) cap_vol_0 = np.pi * cap_height_0**2 * ( 3 * radius_0 - cap_height_0) / 3 cap_vol_1 = np.pi * cap_height_1**2 * ( 3 * radius_1 - cap_height_1) / 3 analytic = cap_vol_0 + cap_vol_1 np.testing.assert_approx_equal(volume_01, analytic, significant=2) np.testing.assert_approx_equal(dice, 2*volume_01 / ( volume_0 + volume_1) , significant=10) def test_dice_no_overlap(): radius_0=5.5 radius_1=4.3 centre_1=12.0 sphere_0 = vtk.vtkSphereSource() sphere_0.SetRadius(radius_0) sphere_0.SetPhiResolution(60) sphere_0.SetThetaResolution(60) sphere_0.SetCenter(0.0, 0.0, 0.0) sphere_0.Update() vtk_model_0 = sphere_0.GetOutput() sphere_1 = vtk.vtkSphereSource() sphere_1.SetRadius(radius_1) sphere_1.SetPhiResolution(60) sphere_1.SetThetaResolution(60) sphere_1.SetCenter(centre_1, 0.0, 0.0) sphere_1.Update() vtk_model_1 = sphere_1.GetOutput() dice, volume_0, volume_1, volume_01 = pdu.two_polydata_dice(vtk_model_0, vtk_model_1) np.testing.assert_approx_equal(volume_0, 4.0 * np.pi * radius_0**3.0 / 3.0, significant=2) np.testing.assert_approx_equal(volume_1, 4.0 * np.pi * radius_1**3.0 / 3.0, significant=2) analytic = 0.0 np.testing.assert_approx_equal(volume_01, analytic, significant=2) np.testing.assert_approx_equal(dice, 2*volume_01 / ( volume_0 + volume_1) , significant=10)
34.216
105
0.715455
import pytest import vtk import numpy as np import sksurgeryvtk.utils.polydata_utils as pdu import sksurgeryvtk.models.vtk_surface_model as vbs def test_overlapping_bounds(): radius_0=10.0 radius_1=7.0 centre_1=5.0 radius_2=4.0 centre_2=15.0 radius_3=4.0 centre_3=0.0 sphere_0 = vtk.vtkSphereSource() sphere_0.SetRadius(radius_0) sphere_0.SetPhiResolution(12) sphere_0.SetThetaResolution(12) sphere_0.SetCenter(0.0, 0.0, 0.0) sphere_0.Update() vtk_model_0 = sphere_0.GetOutput() sphere_1 = vtk.vtkSphereSource() sphere_1.SetRadius(radius_1) sphere_1.SetPhiResolution(12) sphere_1.SetThetaResolution(21) sphere_1.SetCenter(centre_1, 0.0, 0.0) sphere_1.Update() vtk_model_1 = sphere_1.GetOutput() sphere_2 = vtk.vtkSphereSource() sphere_2.SetRadius(radius_2) sphere_2.SetPhiResolution(12) sphere_2.SetThetaResolution(21) sphere_2.SetCenter(centre_2, 0.0, 0.0) sphere_2.Update() vtk_model_2 = sphere_2.GetOutput() sphere_3 = vtk.vtkSphereSource() sphere_3.SetRadius(radius_3) sphere_3.SetPhiResolution(12) sphere_3.SetThetaResolution(21) sphere_3.SetCenter(centre_3, 0.0, 0.0) sphere_3.Update() vtk_model_3 = sphere_3.GetOutput() assert (pdu.check_overlapping_bounds( vtk_model_0, vtk_model_1)) assert (pdu.check_overlapping_bounds( vtk_model_1, vtk_model_0)) assert (not pdu.check_overlapping_bounds( vtk_model_0, vtk_model_2)) assert (not pdu.check_overlapping_bounds( vtk_model_2, vtk_model_0)) assert (pdu.check_overlapping_bounds( vtk_model_0, vtk_model_3)) assert (pdu.check_overlapping_bounds( vtk_model_3, vtk_model_0)) def test_dice_overlap(): radius_0=10.0 radius_1=7.0 centre_1=5.0 sphere_0 = vtk.vtkSphereSource() sphere_0.SetRadius(radius_0) sphere_0.SetPhiResolution(60) sphere_0.SetThetaResolution(60) sphere_0.SetCenter(0.0, 0.0, 0.0) sphere_0.Update() vtk_model_0 = sphere_0.GetOutput() sphere_1 = vtk.vtkSphereSource() sphere_1.SetRadius(radius_1) sphere_1.SetPhiResolution(60) sphere_1.SetThetaResolution(60) sphere_1.SetCenter(centre_1, 0.0, 0.0) sphere_1.Update() vtk_model_1 = sphere_1.GetOutput() dice, volume_0, volume_1, volume_01 = pdu.two_polydata_dice(vtk_model_0, vtk_model_1) np.testing.assert_approx_equal(volume_0, 4.0 * np.pi * radius_0**3.0 / 3.0, significant=2) np.testing.assert_approx_equal(volume_1, 4.0 * np.pi * radius_1**3.0 / 3.0, significant=2) cap_height_0 = ( radius_1 - radius_0 + centre_1) * ( radius_1 + radius_0 - centre_1) / (2 * centre_1) cap_height_1 = ( radius_0 - radius_1 + centre_1) * ( radius_0 + radius_1 - centre_1) / (2 * centre_1) cap_vol_0 = np.pi * cap_height_0**2 * ( 3 * radius_0 - cap_height_0) / 3 cap_vol_1 = np.pi * cap_height_1**2 * ( 3 * radius_1 - cap_height_1) / 3 analytic = cap_vol_0 + cap_vol_1 np.testing.assert_approx_equal(volume_01, analytic, significant=2) np.testing.assert_approx_equal(dice, 2*volume_01 / ( volume_0 + volume_1) , significant=10) def test_dice_no_overlap(): radius_0=5.5 radius_1=4.3 centre_1=12.0 sphere_0 = vtk.vtkSphereSource() sphere_0.SetRadius(radius_0) sphere_0.SetPhiResolution(60) sphere_0.SetThetaResolution(60) sphere_0.SetCenter(0.0, 0.0, 0.0) sphere_0.Update() vtk_model_0 = sphere_0.GetOutput() sphere_1 = vtk.vtkSphereSource() sphere_1.SetRadius(radius_1) sphere_1.SetPhiResolution(60) sphere_1.SetThetaResolution(60) sphere_1.SetCenter(centre_1, 0.0, 0.0) sphere_1.Update() vtk_model_1 = sphere_1.GetOutput() dice, volume_0, volume_1, volume_01 = pdu.two_polydata_dice(vtk_model_0, vtk_model_1) np.testing.assert_approx_equal(volume_0, 4.0 * np.pi * radius_0**3.0 / 3.0, significant=2) np.testing.assert_approx_equal(volume_1, 4.0 * np.pi * radius_1**3.0 / 3.0, significant=2) analytic = 0.0 np.testing.assert_approx_equal(volume_01, analytic, significant=2) np.testing.assert_approx_equal(dice, 2*volume_01 / ( volume_0 + volume_1) , significant=10)
true
true
f7028bb4aee8f166e5448402b874d8240dfcf7c0
3,540
py
Python
main.py
wellbs32/random-password-generator
65b75113cd84250b06892e48ebcc21bc54a5bdb5
[ "MIT" ]
null
null
null
main.py
wellbs32/random-password-generator
65b75113cd84250b06892e48ebcc21bc54a5bdb5
[ "MIT" ]
null
null
null
main.py
wellbs32/random-password-generator
65b75113cd84250b06892e48ebcc21bc54a5bdb5
[ "MIT" ]
null
null
null
import string import random # --- Defining Variables --- LOWER_ALPHABET = list(string.ascii_lowercase) DIGITS = list(string.digits) UPPER_ALPHABET = list(string.ascii_uppercase) SYMBOLS = list(string.punctuation) SYMBOLS_DELETE = ['"', "'", "(", ")", ",", ".", ":", ";", "[", "]", "|", "`", "{", "}"] for x in SYMBOLS_DELETE: SYMBOLS.remove(x) CHAR_TYPES = [LOWER_ALPHABET, DIGITS] # characters used as default # --- PROGRAM INTRO --- print(""" ############################################################# # --- Password Generator --- # ############################################################# # Language: Python # ############################################################# # # # This is my very first project with Python # # Lowercase characteres and digits are used as default # # # ############################################################# """) # --- LENGTH QUESTION --- while True: print("Password Length (Min: 8 / Max: 48):") pass_len = input() try: pass_len = int(pass_len) if pass_len >= 8 and pass_len <= 48: break else: print("\nYou should insert a number between 8 and 16.\n") except ValueError: # In case of the user insert a value that cannot be turned into a 'int' type print("\nYou should insert a NUMBER between 8 and 16.\n") # --- UPPERCASE AND SYMBOLS QUESTION FUNCTION --- def question_checker(phrase, char_type): """Check if the user inserts a valid value on the upper case and symbols question. Then append the specific char type list if he answer is "Yes" """ while True: print("") print(phrase) answer = input().strip().capitalize() if answer == "Yes" or answer == "No": break else: print("\nInvalid Value.\n") def char_assignment(char_check, char_type): if char_check == "Yes": return CHAR_TYPES.append(char_type) else: pass char_assignment(answer, char_type) # --- ASSIGNING UPPERCASE AND/OR SYMBOLS CHARACTERS INTO THE CHAR_TYPES LIST. --- question_checker("Do you want uppercase letters? [Yes/No]", UPPER_ALPHABET) question_checker("Do you want symbols? [Yes/No]", SYMBOLS) # --- CREATE THE PASSWORD --- def create_password(): password_list = [] for x in range(len(CHAR_TYPES)): password_list.append(CHAR_TYPES[x][random.randrange(len(CHAR_TYPES[x]))]) # making at least one of all the char types appear in the password for x in range(pass_len - len(CHAR_TYPES)): random_chartype = random.randrange(len(CHAR_TYPES)) password_list.append(CHAR_TYPES[random_chartype][random.randrange(len(CHAR_TYPES[random_chartype]))]) # the spaces that remained will be filled with random characteres random.shuffle(password_list) password = "".join(password_list) return password # --- SHOW OUTPUT --- def show_password(): print("\n") print(f"Password: {create_password()} ") print("\n") show_password() # --- REMAKE THE PASSWORD --- while True: print("Remake the password? [Yes/No]") answer = input().strip().capitalize() if answer == "Yes" or answer == "No": if answer == "Yes": show_password() else: print("\n") break else: print("\nInvalid Value.\n")
29.5
175
0.549718
import string import random LOWER_ALPHABET = list(string.ascii_lowercase) DIGITS = list(string.digits) UPPER_ALPHABET = list(string.ascii_uppercase) SYMBOLS = list(string.punctuation) SYMBOLS_DELETE = ['"', "'", "(", ")", ",", ".", ":", ";", "[", "]", "|", "`", "{", "}"] for x in SYMBOLS_DELETE: SYMBOLS.remove(x) CHAR_TYPES = [LOWER_ALPHABET, DIGITS] # characters used as default # --- PROGRAM INTRO --- print(""" ############################################################# # --- Password Generator --- # ############################################################# # Language: Python # ############################################################# # # # This is my very first project with Python # # Lowercase characteres and digits are used as default # # # ############################################################# """) # --- LENGTH QUESTION --- while True: print("Password Length (Min: 8 / Max: 48):") pass_len = input() try: pass_len = int(pass_len) if pass_len >= 8 and pass_len <= 48: break else: print("\nYou should insert a number between 8 and 16.\n") except ValueError: # In case of the user insert a value that cannot be turned into a 'int' type print("\nYou should insert a NUMBER between 8 and 16.\n") # --- UPPERCASE AND SYMBOLS QUESTION FUNCTION --- def question_checker(phrase, char_type): while True: print("") print(phrase) answer = input().strip().capitalize() if answer == "Yes" or answer == "No": break else: print("\nInvalid Value.\n") def char_assignment(char_check, char_type): if char_check == "Yes": return CHAR_TYPES.append(char_type) else: pass char_assignment(answer, char_type) # --- ASSIGNING UPPERCASE AND/OR SYMBOLS CHARACTERS INTO THE CHAR_TYPES LIST. --- question_checker("Do you want uppercase letters? [Yes/No]", UPPER_ALPHABET) question_checker("Do you want symbols? [Yes/No]", SYMBOLS) # --- CREATE THE PASSWORD --- def create_password(): password_list = [] for x in range(len(CHAR_TYPES)): password_list.append(CHAR_TYPES[x][random.randrange(len(CHAR_TYPES[x]))]) # making at least one of all the char types appear in the password for x in range(pass_len - len(CHAR_TYPES)): random_chartype = random.randrange(len(CHAR_TYPES)) password_list.append(CHAR_TYPES[random_chartype][random.randrange(len(CHAR_TYPES[random_chartype]))]) # the spaces that remained will be filled with random characteres random.shuffle(password_list) password = "".join(password_list) return password # --- SHOW OUTPUT --- def show_password(): print("\n") print(f"Password: {create_password()} ") print("\n") show_password() # --- REMAKE THE PASSWORD --- while True: print("Remake the password? [Yes/No]") answer = input().strip().capitalize() if answer == "Yes" or answer == "No": if answer == "Yes": show_password() else: print("\n") break else: print("\nInvalid Value.\n")
true
true
f7028c6e33cf3e1d7a434c522068735baf06981a
7,316
py
Python
melange/drivers/aws/aws_driver.py
Rydra/melange
5de67dd4eab506353ea05f30df6d250997e3c97f
[ "MIT" ]
7
2017-11-22T15:52:46.000Z
2022-01-17T23:01:24.000Z
melange/drivers/aws/aws_driver.py
Rydra/melange
5de67dd4eab506353ea05f30df6d250997e3c97f
[ "MIT" ]
1
2017-11-30T16:13:52.000Z
2019-02-12T14:51:54.000Z
melange/drivers/aws/aws_driver.py
Rydra/melange
5de67dd4eab506353ea05f30df6d250997e3c97f
[ "MIT" ]
3
2017-11-15T16:34:20.000Z
2022-01-24T11:05:54.000Z
# type: ignore import json import uuid from json import JSONDecodeError from typing import Tuple, Dict, List import boto3 from melange.drivers.interfaces import Queue, Topic, MessagingDriver, Message class AWSDriver(MessagingDriver): def __init__(self, **kwargs): super().__init__() self.max_number_of_messages = kwargs.get("max_number_of_messages", 10) self.visibility_timeout = kwargs.get("visibility_timeout", 100) self.wait_time_seconds = kwargs.get("wait_time_seconds", 10) def declare_topic(self, topic_name) -> Topic: sns = boto3.resource("sns") topic = sns.create_topic(Name=topic_name) return topic def get_queue(self, queue_name) -> Queue: sqs_res = boto3.resource("sqs") return sqs_res.get_queue_by_name(QueueName=queue_name) def declare_queue( self, queue_name: str, *topics_to_bind: Topic, dead_letter_queue_name: str = None, **kwargs ) -> Tuple[Queue, Queue]: try: queue = self.get_queue(queue_name) except Exception: queue = self._create_queue(queue_name, content_based_deduplication="true") if topics_to_bind: statements = [] for topic in topics_to_bind: statement = { "Sid": "Sid{}".format(uuid.uuid4()), "Effect": "Allow", "Principal": "*", "Resource": queue.attributes["QueueArn"], "Action": "sqs:SendMessage", "Condition": {"ArnEquals": {"aws:SourceArn": topic.arn}}, } statements.append(statement) subscription = topic.subscribe( Protocol="sqs", Endpoint=queue.attributes[ "QueueArn" ], # , Attributes={"RawMessageDelivery": "true"} ) if kwargs.get("filter_events"): filter_policy = {"event_type": kwargs["filter_events"]} else: filter_policy = {} subscription.set_attributes( AttributeName="FilterPolicy", AttributeValue=json.dumps(filter_policy), ) policy = { "Version": "2012-10-17", "Id": "sqspolicy", "Statement": statements, } queue.set_attributes(Attributes={"Policy": json.dumps(policy)}) dead_letter_queue = None if dead_letter_queue_name: try: dead_letter_queue = self.get_queue(dead_letter_queue_name) except Exception: dead_letter_queue = self._create_queue( dead_letter_queue_name, content_based_deduplication="true" ) redrive_policy = { "deadLetterTargetArn": dead_letter_queue.attributes["QueueArn"], "maxReceiveCount": "4", } queue.set_attributes( Attributes={"RedrivePolicy": json.dumps(redrive_policy)} ) return queue, dead_letter_queue def _create_queue(self, queue_name: str, **kwargs) -> Queue: sqs_res = boto3.resource("sqs") fifo = queue_name.endswith(".fifo") attributes = {} if fifo: attributes["FifoQueue"] = "true" attributes["ContentBasedDeduplication"] = ( "true" if kwargs.get("content_based_deduplication") else "false" ) queue = sqs_res.create_queue(QueueName=queue_name, Attributes=attributes) return queue def retrieve_messages(self, queue: Queue, attempt_id=None) -> List[Message]: kwargs = dict( MaxNumberOfMessages=self.max_number_of_messages, VisibilityTimeout=self.visibility_timeout, WaitTimeSeconds=self.wait_time_seconds, MessageAttributeNames=["All"], AttributeNames=["All"], ) if attempt_id: kwargs["ReceiveRequestAttemptId"] = attempt_id messages = queue.receive_messages(**kwargs) # We need to differentiate here whether the message came from SNS or SQS return [self._construct_message(message) for message in messages] def queue_publish( self, content: str, queue, event_type_name: str = None, message_group_id: str = None, message_deduplication_id: str = None, ): kwargs = dict(MessageBody=json.dumps({"Message": content})) if event_type_name: kwargs["MessageAttributes"] = { "event_type": {"DataType": "String", "StringValue": event_type_name} } if message_group_id: kwargs["MessageGroupId"] = message_group_id if message_deduplication_id: kwargs["MessageDeduplicationId"] = message_deduplication_id queue.send_message(**kwargs) def publish( self, content: str, topic: Topic, event_type_name: str, extra_attributes: Dict = None, ): args = dict( Message=content, MessageAttributes={ "event_type": {"DataType": "String", "StringValue": event_type_name} }, ) if extra_attributes: if "subject" in extra_attributes: args["Subject"] = extra_attributes["subject"] if "message_attributes" in extra_attributes: args["MessageAttributes"].update(extra_attributes["message_attributes"]) if "message_structure" in extra_attributes: args["MessageStructure"] = extra_attributes["message_structure"] response = topic.publish(**args) if "MessageId" not in response: raise ConnectionError("Could not send the event to the SNS TOPIC") def acknowledge(self, message: Message) -> None: message.metadata.delete() def close_connection(self) -> None: pass def delete_queue(self, queue: Queue) -> None: queue.delete() def delete_topic(self, topic: Topic) -> None: topic.delete() def _construct_message(self, message) -> Message: body = message.body manifest = "" try: message_content = json.loads(body) if "Message" in message_content: content = message_content["Message"] # Does the content have more attributes? If so, it is very likely that the message came from a non-raw # SNS redirection if "MessageAttributes" in message_content: manifest = ( message_content["MessageAttributes"] .get("event_type", {}) .get("Value") or "" ) else: content = message_content except JSONDecodeError: content = body manifest = ( manifest or message.message_attributes.get("event_type", {}).get("StringValue") or "" ) return Message(message.message_id, content, message, manifest)
32.954955
118
0.562876
import json import uuid from json import JSONDecodeError from typing import Tuple, Dict, List import boto3 from melange.drivers.interfaces import Queue, Topic, MessagingDriver, Message class AWSDriver(MessagingDriver): def __init__(self, **kwargs): super().__init__() self.max_number_of_messages = kwargs.get("max_number_of_messages", 10) self.visibility_timeout = kwargs.get("visibility_timeout", 100) self.wait_time_seconds = kwargs.get("wait_time_seconds", 10) def declare_topic(self, topic_name) -> Topic: sns = boto3.resource("sns") topic = sns.create_topic(Name=topic_name) return topic def get_queue(self, queue_name) -> Queue: sqs_res = boto3.resource("sqs") return sqs_res.get_queue_by_name(QueueName=queue_name) def declare_queue( self, queue_name: str, *topics_to_bind: Topic, dead_letter_queue_name: str = None, **kwargs ) -> Tuple[Queue, Queue]: try: queue = self.get_queue(queue_name) except Exception: queue = self._create_queue(queue_name, content_based_deduplication="true") if topics_to_bind: statements = [] for topic in topics_to_bind: statement = { "Sid": "Sid{}".format(uuid.uuid4()), "Effect": "Allow", "Principal": "*", "Resource": queue.attributes["QueueArn"], "Action": "sqs:SendMessage", "Condition": {"ArnEquals": {"aws:SourceArn": topic.arn}}, } statements.append(statement) subscription = topic.subscribe( Protocol="sqs", Endpoint=queue.attributes[ "QueueArn" ], ) if kwargs.get("filter_events"): filter_policy = {"event_type": kwargs["filter_events"]} else: filter_policy = {} subscription.set_attributes( AttributeName="FilterPolicy", AttributeValue=json.dumps(filter_policy), ) policy = { "Version": "2012-10-17", "Id": "sqspolicy", "Statement": statements, } queue.set_attributes(Attributes={"Policy": json.dumps(policy)}) dead_letter_queue = None if dead_letter_queue_name: try: dead_letter_queue = self.get_queue(dead_letter_queue_name) except Exception: dead_letter_queue = self._create_queue( dead_letter_queue_name, content_based_deduplication="true" ) redrive_policy = { "deadLetterTargetArn": dead_letter_queue.attributes["QueueArn"], "maxReceiveCount": "4", } queue.set_attributes( Attributes={"RedrivePolicy": json.dumps(redrive_policy)} ) return queue, dead_letter_queue def _create_queue(self, queue_name: str, **kwargs) -> Queue: sqs_res = boto3.resource("sqs") fifo = queue_name.endswith(".fifo") attributes = {} if fifo: attributes["FifoQueue"] = "true" attributes["ContentBasedDeduplication"] = ( "true" if kwargs.get("content_based_deduplication") else "false" ) queue = sqs_res.create_queue(QueueName=queue_name, Attributes=attributes) return queue def retrieve_messages(self, queue: Queue, attempt_id=None) -> List[Message]: kwargs = dict( MaxNumberOfMessages=self.max_number_of_messages, VisibilityTimeout=self.visibility_timeout, WaitTimeSeconds=self.wait_time_seconds, MessageAttributeNames=["All"], AttributeNames=["All"], ) if attempt_id: kwargs["ReceiveRequestAttemptId"] = attempt_id messages = queue.receive_messages(**kwargs) return [self._construct_message(message) for message in messages] def queue_publish( self, content: str, queue, event_type_name: str = None, message_group_id: str = None, message_deduplication_id: str = None, ): kwargs = dict(MessageBody=json.dumps({"Message": content})) if event_type_name: kwargs["MessageAttributes"] = { "event_type": {"DataType": "String", "StringValue": event_type_name} } if message_group_id: kwargs["MessageGroupId"] = message_group_id if message_deduplication_id: kwargs["MessageDeduplicationId"] = message_deduplication_id queue.send_message(**kwargs) def publish( self, content: str, topic: Topic, event_type_name: str, extra_attributes: Dict = None, ): args = dict( Message=content, MessageAttributes={ "event_type": {"DataType": "String", "StringValue": event_type_name} }, ) if extra_attributes: if "subject" in extra_attributes: args["Subject"] = extra_attributes["subject"] if "message_attributes" in extra_attributes: args["MessageAttributes"].update(extra_attributes["message_attributes"]) if "message_structure" in extra_attributes: args["MessageStructure"] = extra_attributes["message_structure"] response = topic.publish(**args) if "MessageId" not in response: raise ConnectionError("Could not send the event to the SNS TOPIC") def acknowledge(self, message: Message) -> None: message.metadata.delete() def close_connection(self) -> None: pass def delete_queue(self, queue: Queue) -> None: queue.delete() def delete_topic(self, topic: Topic) -> None: topic.delete() def _construct_message(self, message) -> Message: body = message.body manifest = "" try: message_content = json.loads(body) if "Message" in message_content: content = message_content["Message"] if "MessageAttributes" in message_content: manifest = ( message_content["MessageAttributes"] .get("event_type", {}) .get("Value") or "" ) else: content = message_content except JSONDecodeError: content = body manifest = ( manifest or message.message_attributes.get("event_type", {}).get("StringValue") or "" ) return Message(message.message_id, content, message, manifest)
true
true
f7028c9a3da8e2b02799a577a83179212a411809
3,411
py
Python
imsnpars/nparser/graph/builder.py
zentrum-lexikographie/IMSnPars
8d19aa1fc76b0277c861cec774ad81f62cd4e244
[ "Apache-2.0" ]
2
2020-09-28T09:30:22.000Z
2020-11-28T13:36:13.000Z
imsnpars/nparser/graph/builder.py
zentrum-lexikographie/IMSnPars
8d19aa1fc76b0277c861cec774ad81f62cd4e244
[ "Apache-2.0" ]
1
2020-08-13T14:11:45.000Z
2020-08-13T14:11:45.000Z
imsnpars/nparser/graph/builder.py
zentrum-lexikographie/IMSnPars
8d19aa1fc76b0277c861cec774ad81f62cd4e244
[ "Apache-2.0" ]
1
2020-11-28T14:32:56.000Z
2020-11-28T14:32:56.000Z
''' Created on 23.08.2017 @author: falensaa ''' import logging import sys import imsnpars.nparser.features import imsnpars.nparser.network import imsnpars.nparser.graph.features as gfeatures from imsnpars.nparser.graph import task, decoder from imsnpars.nparser.graph.mst import cle from imsnpars.nparser.labels import task as ltask def buildMSTDecoder(opts, featBuilder): if opts.mst == "CLE": mstAlg = cle.ChuLiuEdmonds() decod = decoder.FirstOrderDecoder(featBuilder) else: logging.error("Unknown algorithm: %s" % opts.mst) sys.exit() logging.info("Graph system used: %s" % type(mstAlg)) logging.info("Decoder used: %s" % type(decod)) return mstAlg, decod def buildGraphFeatureExtractors(featuresD, reprDim): featIds = { ("h", "0"): gfeatures.FeatId.HEAD, ("d", "0"): gfeatures.FeatId.DEP, ("h", "1"): gfeatures.FeatId.HEAD_P_1, ("h", "2"): gfeatures.FeatId.HEAD_P_2, ("d", "1"): gfeatures.FeatId.DEP_P_1, ("d", "2"): gfeatures.FeatId.DEP_P_2, ("h", "-1"): gfeatures.FeatId.HEAD_M_1, ("h", "-2"): gfeatures.FeatId.HEAD_M_2, ("d", "-1"): gfeatures.FeatId.DEP_M_1, ("d", "-2"): gfeatures.FeatId.DEP_M_2, ("dist", "0") : gfeatures.FeatId.DIST } mainFeatIds = {"h": gfeatures.FeatId.HEAD, "d": gfeatures.FeatId.DEP } featureExtractors = { } featureBuilders = { } for feat in featuresD: if "+" in feat: name, shift = feat.split("+") elif "-" in feat: name, shift = feat.split("-") shift = "-" + shift else: name, shift = feat, "0" featId = featIds.get((name, shift)) if featId == None: logging.error("Unknown token id: %s" % feat) sys.exit() # for now there is only one builder -- distance if featId == gfeatures.FeatId.DIST: featureBuilders[featId] = gfeatures.DistFeatureBuilder(reprDim) else: mainFeature = mainFeatIds[name] if mainFeature not in featureExtractors: featureExtractors[mainFeature] = gfeatures.TokenFeatExtractor() featureExtractors[mainFeature].addShift(featId, int(shift)) return featureExtractors, featureBuilders def buildGraphParser(opts, dummyBuilder, reprBuilder): reprDim = reprBuilder.getDim() tokExtractors, featBuilders = buildGraphFeatureExtractors(opts.features, reprDim) extractor = gfeatures.GraphFeatureExtractor(tokExtractors) featIds = extractor.getFeatIds() + [ feat.getFeatId() for feat in featBuilders.values() ] network = imsnpars.nparser.network.ParserNetwork(opts.mlpHiddenDim, opts.nonLinFun, featIds) featBuilder = imsnpars.nparser.features.FeatReprBuilder(extractor, featBuilders, dummyBuilder, network, opts.parseLayer) mstAlg, decod = buildMSTDecoder(opts, featBuilder) if opts.labeler == "graph": lblDict = ltask.LblTagDict() parsingTask = task.NNGraphParsingTaskWithLbl(mstAlg, featBuilder, decod, network, opts.augment, lblDict) else: parsingTask = task.NNGraphParsingTask(mstAlg, featBuilder, decod, network, opts.augment) return parsingTask
37.076087
124
0.619173
import logging import sys import imsnpars.nparser.features import imsnpars.nparser.network import imsnpars.nparser.graph.features as gfeatures from imsnpars.nparser.graph import task, decoder from imsnpars.nparser.graph.mst import cle from imsnpars.nparser.labels import task as ltask def buildMSTDecoder(opts, featBuilder): if opts.mst == "CLE": mstAlg = cle.ChuLiuEdmonds() decod = decoder.FirstOrderDecoder(featBuilder) else: logging.error("Unknown algorithm: %s" % opts.mst) sys.exit() logging.info("Graph system used: %s" % type(mstAlg)) logging.info("Decoder used: %s" % type(decod)) return mstAlg, decod def buildGraphFeatureExtractors(featuresD, reprDim): featIds = { ("h", "0"): gfeatures.FeatId.HEAD, ("d", "0"): gfeatures.FeatId.DEP, ("h", "1"): gfeatures.FeatId.HEAD_P_1, ("h", "2"): gfeatures.FeatId.HEAD_P_2, ("d", "1"): gfeatures.FeatId.DEP_P_1, ("d", "2"): gfeatures.FeatId.DEP_P_2, ("h", "-1"): gfeatures.FeatId.HEAD_M_1, ("h", "-2"): gfeatures.FeatId.HEAD_M_2, ("d", "-1"): gfeatures.FeatId.DEP_M_1, ("d", "-2"): gfeatures.FeatId.DEP_M_2, ("dist", "0") : gfeatures.FeatId.DIST } mainFeatIds = {"h": gfeatures.FeatId.HEAD, "d": gfeatures.FeatId.DEP } featureExtractors = { } featureBuilders = { } for feat in featuresD: if "+" in feat: name, shift = feat.split("+") elif "-" in feat: name, shift = feat.split("-") shift = "-" + shift else: name, shift = feat, "0" featId = featIds.get((name, shift)) if featId == None: logging.error("Unknown token id: %s" % feat) sys.exit() if featId == gfeatures.FeatId.DIST: featureBuilders[featId] = gfeatures.DistFeatureBuilder(reprDim) else: mainFeature = mainFeatIds[name] if mainFeature not in featureExtractors: featureExtractors[mainFeature] = gfeatures.TokenFeatExtractor() featureExtractors[mainFeature].addShift(featId, int(shift)) return featureExtractors, featureBuilders def buildGraphParser(opts, dummyBuilder, reprBuilder): reprDim = reprBuilder.getDim() tokExtractors, featBuilders = buildGraphFeatureExtractors(opts.features, reprDim) extractor = gfeatures.GraphFeatureExtractor(tokExtractors) featIds = extractor.getFeatIds() + [ feat.getFeatId() for feat in featBuilders.values() ] network = imsnpars.nparser.network.ParserNetwork(opts.mlpHiddenDim, opts.nonLinFun, featIds) featBuilder = imsnpars.nparser.features.FeatReprBuilder(extractor, featBuilders, dummyBuilder, network, opts.parseLayer) mstAlg, decod = buildMSTDecoder(opts, featBuilder) if opts.labeler == "graph": lblDict = ltask.LblTagDict() parsingTask = task.NNGraphParsingTaskWithLbl(mstAlg, featBuilder, decod, network, opts.augment, lblDict) else: parsingTask = task.NNGraphParsingTask(mstAlg, featBuilder, decod, network, opts.augment) return parsingTask
true
true
f7028cc13315d37bcb502995874600452971aa1d
6,903
py
Python
kubernetes/client/models/v1beta1_event_list.py
venukarnati92/python-1
3fabf9ed9f4758fb5133975a58fc147471e91d9d
[ "Apache-2.0" ]
1
2022-02-07T21:57:20.000Z
2022-02-07T21:57:20.000Z
kubernetes/client/models/v1beta1_event_list.py
venukarnati92/python-1
3fabf9ed9f4758fb5133975a58fc147471e91d9d
[ "Apache-2.0" ]
1
2022-03-01T03:37:57.000Z
2022-03-01T03:37:57.000Z
kubernetes/client/models/v1beta1_event_list.py
venukarnati92/python-1
3fabf9ed9f4758fb5133975a58fc147471e91d9d
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 """ Kubernetes No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 The version of the OpenAPI document: release-1.22 Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from kubernetes.client.configuration import Configuration class V1beta1EventList(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'api_version': 'str', 'items': 'list[V1beta1Event]', 'kind': 'str', 'metadata': 'V1ListMeta' } attribute_map = { 'api_version': 'apiVersion', 'items': 'items', 'kind': 'kind', 'metadata': 'metadata' } def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501 """V1beta1EventList - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._items = None self._kind = None self._metadata = None self.discriminator = None if api_version is not None: self.api_version = api_version self.items = items if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata @property def api_version(self): """Gets the api_version of this V1beta1EventList. # noqa: E501 APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :return: The api_version of this V1beta1EventList. # noqa: E501 :rtype: str """ return self._api_version @api_version.setter def api_version(self, api_version): """Sets the api_version of this V1beta1EventList. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :param api_version: The api_version of this V1beta1EventList. # noqa: E501 :type: str """ self._api_version = api_version @property def items(self): """Gets the items of this V1beta1EventList. # noqa: E501 items is a list of schema objects. # noqa: E501 :return: The items of this V1beta1EventList. # noqa: E501 :rtype: list[V1beta1Event] """ return self._items @items.setter def items(self, items): """Sets the items of this V1beta1EventList. items is a list of schema objects. # noqa: E501 :param items: The items of this V1beta1EventList. # noqa: E501 :type: list[V1beta1Event] """ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501 raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501 self._items = items @property def kind(self): """Gets the kind of this V1beta1EventList. # noqa: E501 Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :return: The kind of this V1beta1EventList. # noqa: E501 :rtype: str """ return self._kind @kind.setter def kind(self, kind): """Sets the kind of this V1beta1EventList. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :param kind: The kind of this V1beta1EventList. # noqa: E501 :type: str """ self._kind = kind @property def metadata(self): """Gets the metadata of this V1beta1EventList. # noqa: E501 :return: The metadata of this V1beta1EventList. # noqa: E501 :rtype: V1ListMeta """ return self._metadata @metadata.setter def metadata(self, metadata): """Sets the metadata of this V1beta1EventList. :param metadata: The metadata of this V1beta1EventList. # noqa: E501 :type: V1ListMeta """ self._metadata = metadata def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1beta1EventList): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1beta1EventList): return True return self.to_dict() != other.to_dict()
33.509709
312
0.623932
import pprint import re import six from kubernetes.client.configuration import Configuration class V1beta1EventList(object): openapi_types = { 'api_version': 'str', 'items': 'list[V1beta1Event]', 'kind': 'str', 'metadata': 'V1ListMeta' } attribute_map = { 'api_version': 'apiVersion', 'items': 'items', 'kind': 'kind', 'metadata': 'metadata' } def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._items = None self._kind = None self._metadata = None self.discriminator = None if api_version is not None: self.api_version = api_version self.items = items if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata @property def api_version(self): return self._api_version @api_version.setter def api_version(self, api_version): self._api_version = api_version @property def items(self): return self._items @items.setter def items(self, items): if self.local_vars_configuration.client_side_validation and items is None: raise ValueError("Invalid value for `items`, must not be `None`") self._items = items @property def kind(self): return self._kind @kind.setter def kind(self, kind): self._kind = kind @property def metadata(self): return self._metadata @metadata.setter def metadata(self, metadata): self._metadata = metadata def to_dict(self): result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str() def __eq__(self, other): if not isinstance(other, V1beta1EventList): return False return self.to_dict() == other.to_dict() def __ne__(self, other): if not isinstance(other, V1beta1EventList): return True return self.to_dict() != other.to_dict()
true
true
f7028d4fe063a3a562eecc15b66515f81b9f692f
5,130
py
Python
bout.py
vishalbelsare/bout
4731566d2cc2e3cd6f2c4f9cef615a39a66d4516
[ "MIT" ]
null
null
null
bout.py
vishalbelsare/bout
4731566d2cc2e3cd6f2c4f9cef615a39a66d4516
[ "MIT" ]
null
null
null
bout.py
vishalbelsare/bout
4731566d2cc2e3cd6f2c4f9cef615a39a66d4516
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- """Bout (read bank-out) extracts transactions from pdf bank statements. _ _ (_) (_) (_) _ _ _ _ _ _ _ _ _ (_) _ _ (_)(_)(_)(_)_ _ (_)(_)(_) _ (_) (_)(_)(_)(_)(_) (_) (_)(_) (_)(_) (_) (_) (_) (_)(_) (_)(_) (_) (_) _ (_) _ _ _(_)(_) _ _ _ (_)(_)_ _ _(_)_ (_)_ _(_) (_)(_)(_)(_) (_)(_)(_) (_)(_)(_) (_) (_)(_) """ import io import logging import click import csv from collections import namedtuple from datetime import datetime logger = logging.getLogger("bout") profiles = {} Transaction = namedtuple("Transaction", ["id", "date", "payee", "memo", "amount"]) InvalidTransaction = namedtuple("InvalidTransaction", []) def get_icici_csv(data_row): """Convert a transaction row to tuple. Details of fields 0: 'D', # Transaction date 2: 'M', # Transaction details 3: 'T', # Deposit 4: 'T-', # Withdrawal """ logger.debug("get_icicicsv: Data row = {}".format(data_row)) date = data_row[0].replace('-', '/') if _valid_date(date): amt = "-{}".format(data_row[4]) if data_row[3] != "0": amt = data_row[3] return Transaction(id=0, date=date, payee="", # Empty for ICICI bank account memo=data_row[2], amount=amt) return InvalidTransaction() def get_icicicc_csv(data_row): """Convert a transaction row to tuple. Details of fields 0: 'D', # Transaction date 2: 'M', # Transaction details 5: 'T', # Amount """ logger.debug("get_icicicsv: Data row = {}".format(data_row)) date = data_row[0] if _valid_date(date, date_format="%d/%m/%Y"): amt = "-{}".format(data_row[5]) if data_row[6] == "CR": amt = data_row[5] return Transaction(id=0, date=date, payee="", # Empty for ICICI bank account memo=data_row[2], amount=amt) return InvalidTransaction() def qif_header(): """Print qif header.""" click.echo("!Account\nNMyAccount\nTMyBank\n^\n!Type:Bank") def to_qif(transaction): """Transform a cleaned up row to qif format. Returns: string of a particular transaction in qif format See wikipedia for more details of QIF format. https://en.wikipedia.org/wiki/Quicken_Interchange_Format#Detail_items """ logger.debug("to_qif: Input = {}".format(transaction)) return "D{0}\nM{1}\nT{2}\n^\n\n"\ .format(transaction.date, transaction.memo, transaction.amount) def _valid_date(date_value, date_format="%d/%m/%Y"): """Validate a transaction date.""" try: transaction_date = datetime.strptime(date_value, date_format) return transaction_date is not None except ValueError: return False def _filter_csv_header(doc, header): head_skip = False mem = io.StringIO() with open(doc, encoding='utf-8', mode='r') as f: for line in f: if line.startswith(header): head_skip = True continue if head_skip and (not line or line.isspace()): break if head_skip and ',' in line: mem.write(line) mem.seek(0) return csv.reader(mem) @click.command() @click.argument("doc", type=click.Path(exists=True)) @click.option("--profile", prompt="Choose a profile", default="icici", show_default=True, type=click.Choice(["icici", "icicicc"]), help="Document type profile.") @click.option("--debug", is_flag=True, show_default=True, help="Show diagnostic messages.") def start(doc, profile, debug): """Bout (read bank-out) extracts transactions from csv bank statements.""" if debug: logging.basicConfig(level=logging.DEBUG) logger.info("Verbose messages are enabled.") profiles.update({"icici": get_icici_csv, "icicicc": get_icicicc_csv}) rows = [] if profile == "icici": header = "DATE,MODE,PARTICULARS,DEPOSITS,WITHDRAWALS,BALANCE" rows = _filter_csv_header(doc, header) elif profile == "icicicc": header = "Date,Sr.No.,Transaction Details,Reward Point Header,Intl.Amount,Amount(in Rs),BillingAmountSign" rows = _filter_csv_header(doc, header) # row -> clean_row # clean_row, profile -> transaction # transaction -> qif create_transaction = profiles[profile] print_header = False for r in rows: transaction = create_transaction(r) if type(transaction) is not InvalidTransaction: if not print_header: qif_header() print_header = True click.echo(to_qif(transaction)) if __name__ == '__main__': start()
31.666667
114
0.554386
import io import logging import click import csv from collections import namedtuple from datetime import datetime logger = logging.getLogger("bout") profiles = {} Transaction = namedtuple("Transaction", ["id", "date", "payee", "memo", "amount"]) InvalidTransaction = namedtuple("InvalidTransaction", []) def get_icici_csv(data_row): logger.debug("get_icicicsv: Data row = {}".format(data_row)) date = data_row[0].replace('-', '/') if _valid_date(date): amt = "-{}".format(data_row[4]) if data_row[3] != "0": amt = data_row[3] return Transaction(id=0, date=date, payee="", memo=data_row[2], amount=amt) return InvalidTransaction() def get_icicicc_csv(data_row): logger.debug("get_icicicsv: Data row = {}".format(data_row)) date = data_row[0] if _valid_date(date, date_format="%d/%m/%Y"): amt = "-{}".format(data_row[5]) if data_row[6] == "CR": amt = data_row[5] return Transaction(id=0, date=date, payee="", memo=data_row[2], amount=amt) return InvalidTransaction() def qif_header(): click.echo("!Account\nNMyAccount\nTMyBank\n^\n!Type:Bank") def to_qif(transaction): logger.debug("to_qif: Input = {}".format(transaction)) return "D{0}\nM{1}\nT{2}\n^\n\n"\ .format(transaction.date, transaction.memo, transaction.amount) def _valid_date(date_value, date_format="%d/%m/%Y"): try: transaction_date = datetime.strptime(date_value, date_format) return transaction_date is not None except ValueError: return False def _filter_csv_header(doc, header): head_skip = False mem = io.StringIO() with open(doc, encoding='utf-8', mode='r') as f: for line in f: if line.startswith(header): head_skip = True continue if head_skip and (not line or line.isspace()): break if head_skip and ',' in line: mem.write(line) mem.seek(0) return csv.reader(mem) @click.command() @click.argument("doc", type=click.Path(exists=True)) @click.option("--profile", prompt="Choose a profile", default="icici", show_default=True, type=click.Choice(["icici", "icicicc"]), help="Document type profile.") @click.option("--debug", is_flag=True, show_default=True, help="Show diagnostic messages.") def start(doc, profile, debug): if debug: logging.basicConfig(level=logging.DEBUG) logger.info("Verbose messages are enabled.") profiles.update({"icici": get_icici_csv, "icicicc": get_icicicc_csv}) rows = [] if profile == "icici": header = "DATE,MODE,PARTICULARS,DEPOSITS,WITHDRAWALS,BALANCE" rows = _filter_csv_header(doc, header) elif profile == "icicicc": header = "Date,Sr.No.,Transaction Details,Reward Point Header,Intl.Amount,Amount(in Rs),BillingAmountSign" rows = _filter_csv_header(doc, header) create_transaction = profiles[profile] print_header = False for r in rows: transaction = create_transaction(r) if type(transaction) is not InvalidTransaction: if not print_header: qif_header() print_header = True click.echo(to_qif(transaction)) if __name__ == '__main__': start()
true
true
f7028e106fb1937ccea8ec628aca73fc739aadb5
1,355
py
Python
colorizer.py
official71/ezmemo
46348885053372efd62fc3ab1c3b39e31681e053
[ "MIT" ]
null
null
null
colorizer.py
official71/ezmemo
46348885053372efd62fc3ab1c3b39e31681e053
[ "MIT" ]
11
2018-03-29T23:52:38.000Z
2018-04-26T17:44:43.000Z
colorizer.py
official71/ezmemo
46348885053372efd62fc3ab1c3b39e31681e053
[ "MIT" ]
null
null
null
from colored import * import staticconf """ You might find the colored documentation very useful: https://pypi.python.org/pypi/colored """ ENABLE_COLORIZER = staticconf.read_string('enable_colorizer', default='false').lower() == 'true' def colorizer_enabled(function): """do not colorize if it's not enabled""" def wrapper(*args): if ENABLE_COLORIZER: return function(*args) elif args: return args[0] else: return args return wrapper # attr and colors ATTR_RESET = attr('reset') COLOR_INDEX = fg(199) COLOR_TITLE = fg(45) COLOR_TAG_0 = fg(10) + attr('bold') COLOR_TAG_1 = fg(10) COLOR_TAG_2 = fg(87) COLOR_TAG_3 = fg(208) COLOR_TAG_4 = fg(252) @colorizer_enabled def color_index(index): return COLOR_INDEX + index + ATTR_RESET @colorizer_enabled def color_title(title): return COLOR_TITLE + title + ATTR_RESET def _color_by_score(score): if score >= 1: return COLOR_TAG_0 elif score >= 0.9: return COLOR_TAG_1 elif score >= 0.8: return COLOR_TAG_2 elif score >= 0.7: return COLOR_TAG_3 return COLOR_TAG_4 @colorizer_enabled def _color_tag(tag, score): return _color_by_score(score) + tag + ATTR_RESET def color_tags(scored_tags): return ", ".join((_color_tag(tag, score) for tag, score in scored_tags))
22.583333
96
0.678229
from colored import * import staticconf ENABLE_COLORIZER = staticconf.read_string('enable_colorizer', default='false').lower() == 'true' def colorizer_enabled(function): def wrapper(*args): if ENABLE_COLORIZER: return function(*args) elif args: return args[0] else: return args return wrapper ATTR_RESET = attr('reset') COLOR_INDEX = fg(199) COLOR_TITLE = fg(45) COLOR_TAG_0 = fg(10) + attr('bold') COLOR_TAG_1 = fg(10) COLOR_TAG_2 = fg(87) COLOR_TAG_3 = fg(208) COLOR_TAG_4 = fg(252) @colorizer_enabled def color_index(index): return COLOR_INDEX + index + ATTR_RESET @colorizer_enabled def color_title(title): return COLOR_TITLE + title + ATTR_RESET def _color_by_score(score): if score >= 1: return COLOR_TAG_0 elif score >= 0.9: return COLOR_TAG_1 elif score >= 0.8: return COLOR_TAG_2 elif score >= 0.7: return COLOR_TAG_3 return COLOR_TAG_4 @colorizer_enabled def _color_tag(tag, score): return _color_by_score(score) + tag + ATTR_RESET def color_tags(scored_tags): return ", ".join((_color_tag(tag, score) for tag, score in scored_tags))
true
true
f7028f059677a83cf6bbecfd7df23260f585b48f
785
py
Python
sdk/media/azure-mgmt-media/azure/mgmt/media/aio/__init__.py
vincenttran-msft/azure-sdk-for-python
348b56f9f03eeb3f7b502eed51daf494ffff874d
[ "MIT" ]
1
2022-02-01T18:50:12.000Z
2022-02-01T18:50:12.000Z
sdk/media/azure-mgmt-media/azure/mgmt/media/aio/__init__.py
vincenttran-msft/azure-sdk-for-python
348b56f9f03eeb3f7b502eed51daf494ffff874d
[ "MIT" ]
null
null
null
sdk/media/azure-mgmt-media/azure/mgmt/media/aio/__init__.py
vincenttran-msft/azure-sdk-for-python
348b56f9f03eeb3f7b502eed51daf494ffff874d
[ "MIT" ]
null
null
null
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from ._azure_media_services import AzureMediaServices __all__ = ['AzureMediaServices'] # `._patch.py` is used for handwritten extensions to the generated code # Example: https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/customize_code/how-to-patch-sdk-code.md from ._patch import patch_sdk patch_sdk()
49.0625
114
0.636943
from ._azure_media_services import AzureMediaServices __all__ = ['AzureMediaServices'] from ._patch import patch_sdk patch_sdk()
true
true
f7028f58d5d64339a8ee0336fccba8b58dc44b75
33,617
py
Python
tests/unit/participants/test_views.py
dubesar/EvalAI
a38fbc8ff8c58f26fb2e61ef67e13055fb0c1f29
[ "BSD-3-Clause" ]
1
2019-11-08T05:23:11.000Z
2019-11-08T05:23:11.000Z
tests/unit/participants/test_views.py
dubesar/EvalAI
a38fbc8ff8c58f26fb2e61ef67e13055fb0c1f29
[ "BSD-3-Clause" ]
2
2019-07-19T04:17:48.000Z
2019-07-19T06:26:31.000Z
tests/unit/participants/test_views.py
Sanji515/EvalAI
888806063b0d6423fd35754e3c37aad02c2b05f7
[ "BSD-3-Clause" ]
1
2020-02-15T11:34:13.000Z
2020-02-15T11:34:13.000Z
from datetime import timedelta from django.core.urlresolvers import reverse_lazy from django.contrib.auth.models import User from django.utils import timezone from allauth.account.models import EmailAddress from rest_framework import status from rest_framework.test import APITestCase, APIClient from challenges.models import Challenge from hosts.models import ChallengeHost, ChallengeHostTeam from participants.models import ParticipantTeam, Participant class BaseAPITestClass(APITestCase): def setUp(self): self.client = APIClient(enforce_csrf_checks=True) self.user = User.objects.create( username="someuser", email="user@test.com", password="secret_password", ) EmailAddress.objects.create( user=self.user, email="user@test.com", primary=True, verified=True ) self.invite_user = User.objects.create( username="otheruser", email="other@platform.com", password="other_secret_password", ) self.participant_team = ParticipantTeam.objects.create( team_name="Participant Team", created_by=self.user ) self.participant = Participant.objects.create( user=self.user, team=self.participant_team, status=Participant.SELF ) self.client.force_authenticate(user=self.user) class GetParticipantTeamTest(BaseAPITestClass): url = reverse_lazy("participants:get_participant_team_list") def setUp(self): super(GetParticipantTeamTest, self).setUp() self.user2 = User.objects.create( username="user2", email="user2@platform.com", password="user2_password", ) EmailAddress.objects.create( user=self.user2, email="user2@platform.com", primary=True, verified=True, ) self.participant2 = Participant.objects.create( user=self.user2, status=Participant.ACCEPTED, team=self.participant_team, ) def test_get_challenge(self): expected = [ { "id": self.participant_team.pk, "team_name": self.participant_team.team_name, "created_by": self.user.username, "team_url": self.participant_team.team_url, "members": [ { "member_name": self.participant.user.username, "status": self.participant.status, "member_id": self.participant.user.id, }, { "member_name": self.participant2.user.username, "status": self.participant2.status, "member_id": self.participant2.user.id, }, ], } ] response = self.client.get(self.url, {}) self.assertEqual(response.data["results"], expected) self.assertEqual(response.status_code, status.HTTP_200_OK) class CreateParticipantTeamTest(BaseAPITestClass): url = reverse_lazy("participants:get_participant_team_list") def setUp(self): super(CreateParticipantTeamTest, self).setUp() self.data = {"team_name": "New Participant Team"} def test_create_participant_team_with_all_data(self): response = self.client.post(self.url, self.data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_create_participant_team_with_team_name_same_as_with_existing_team( self ): expected = { "team_name": [ "participant team with this team name already exists." ] } response = self.client.post(self.url, self.data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) # Creating team with same team name response = self.client.post(self.url, self.data) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual(response.data, expected) def test_create_participant_team_with_no_data(self): del self.data["team_name"] response = self.client.post(self.url, self.data) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) class GetParticularParticipantTeam(BaseAPITestClass): def setUp(self): super(GetParticularParticipantTeam, self).setUp() self.url = reverse_lazy( "participants:get_participant_team_details", kwargs={"pk": self.participant_team.pk}, ) self.user2 = User.objects.create( username="user2", email="user2@platform.com", password="user2_password", ) EmailAddress.objects.create( user=self.user2, email="user2@platform.com", primary=True, verified=True, ) self.participant2 = Participant.objects.create( user=self.user2, status=Participant.ACCEPTED, team=self.participant_team, ) def test_get_particular_participant_team(self): expected = { "id": self.participant_team.pk, "team_name": self.participant_team.team_name, "created_by": self.user.username, "team_url": self.participant_team.team_url, "members": [ { "member_name": self.participant.user.username, "status": self.participant.status, "member_id": self.participant.user.id, }, { "member_name": self.participant2.user.username, "status": self.participant2.status, "member_id": self.participant2.user.id, }, ], } response = self.client.get(self.url, {}) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_particular_participant_team_does_not_exist(self): self.url = reverse_lazy( "participants:get_participant_team_details", kwargs={"pk": self.participant_team.pk + 1}, ) expected = {"error": "ParticipantTeam does not exist"} response = self.client.get(self.url, {}) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE) class UpdateParticularParticipantTeam(BaseAPITestClass): def setUp(self): super(UpdateParticularParticipantTeam, self).setUp() self.url = reverse_lazy( "participants:get_participant_team_details", kwargs={"pk": self.participant_team.pk}, ) self.partial_update_participant_team_name = ( "Partial Update Participant Team" ) self.update_participant_team_name = "Update Test Participant Team" self.data = {"team_name": self.update_participant_team_name} def test_particular_participant_team_partial_update(self): self.partial_update_data = { "team_name": self.partial_update_participant_team_name } expected = { "id": self.participant_team.pk, "team_name": self.partial_update_participant_team_name, "created_by": self.user.username, "team_url": self.participant_team.team_url, } response = self.client.patch(self.url, self.partial_update_data) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_particular_participant_team_update(self): expected = { "id": self.participant_team.pk, "team_name": self.update_participant_team_name, "created_by": self.user.username, "team_url": self.participant_team.team_url, } response = self.client.put(self.url, self.data) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_particular_participant_team_update_with_no_data(self): self.data = {"team_name": ""} response = self.client.put(self.url, self.data) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) class DeleteParticularParticipantTeam(BaseAPITestClass): def setUp(self): super(DeleteParticularParticipantTeam, self).setUp() self.url = reverse_lazy( "participants:get_participant_team_details", kwargs={"pk": self.participant_team.pk}, ) def test_particular_participant_team_delete(self): response = self.client.delete(self.url, {}) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) class InviteParticipantToTeamTest(BaseAPITestClass): def setUp(self): super(InviteParticipantToTeamTest, self).setUp() self.data = {"email": self.invite_user.email} self.url = reverse_lazy( "participants:invite_participant_to_team", kwargs={"pk": self.participant_team.pk}, ) def test_invite_participant_to_team_with_all_data(self): expected = {"message": "User has been successfully added to the team!"} response = self.client.post(self.url, self.data) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) def test_invite_participant_to_team_with_no_data(self): del self.data["email"] response = self.client.post(self.url, self.data) self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE) def test_invite_self_to_team(self): self.data = {"email": self.user.email} expected = {"error": "User is already part of the team!"} response = self.client.post(self.url, self.data) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE) def test_invite_to_other_team_which_doesnot_belong_to_user(self): temp_user = User.objects.create( username="temp_user", password="test_password" ) temp_participant_team = ParticipantTeam.objects.create( team_name="Test Team 1", created_by=temp_user ) expected = {"error": "You are not a member of this team!"} self.url = reverse_lazy( "participants:invite_participant_to_team", kwargs={"pk": temp_participant_team.pk}, ) response = self.client.post(self.url, self.data) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_invite_user_which_does_not_exist_to_team(self): self.data = {"email": "userwhichdoesnotexist@platform.com"} expected = {"error": "User does not exist with this email address!"} response = self.client.post(self.url, self.data) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE) def test_particular_participant_team_for_invite_does_not_exist(self): self.url = reverse_lazy( "participants:invite_participant_to_team", kwargs={"pk": self.participant_team.pk + 1}, ) expected = {"error": "Participant Team does not exist"} response = self.client.post(self.url, {}) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_invite_participant_to_team_when_user_cannot_be_invited(self): """ NOTE user: host user user1: participant 1 user2: participant 2 """ self.user2 = User.objects.create( username="user2", email="user2@platform.com", password="user2_password", ) EmailAddress.objects.create( user=self.user2, email="user2@platform.com", primary=True, verified=True, ) self.user3 = User.objects.create( username="user3", email="user3@platform.com", password="user3_password", ) EmailAddress.objects.create( user=self.user3, email="user3@platform.com", primary=True, verified=True, ) self.participant_team2 = ParticipantTeam.objects.create( team_name="Participant Team created by user 2", created_by=self.user2, ) self.participant_team3 = ParticipantTeam.objects.create( team_name="Participant Team created by user 3", created_by=self.user3, ) self.participant2 = Participant.objects.create( user=self.user2, status=Participant.ACCEPTED, team=self.participant_team2, ) self.participant3 = Participant.objects.create( user=self.user3, status=Participant.ACCEPTED, team=self.participant_team3, ) self.challenge_host_team = ChallengeHostTeam.objects.create( team_name="Test Challenge Host Team", created_by=self.user ) self.challenge = Challenge.objects.create( title="Test Challenge", short_description="Short description for test challenge", description="Description for test challenge", terms_and_conditions="Terms and conditions for test challenge", submission_guidelines="Submission guidelines for test challenge", creator=self.challenge_host_team, published=False, enable_forum=True, leaderboard_description=None, anonymous_leaderboard=False, start_date=timezone.now() - timedelta(days=2), end_date=timezone.now() + timedelta(days=1), ) self.client.force_authenticate(user=self.user2) self.challenge.participant_teams.add(self.participant_team2) self.challenge.participant_teams.add(self.participant_team3) self.data = {"email": self.user3.email} self.url = reverse_lazy( "participants:invite_participant_to_team", kwargs={"pk": self.participant_team2.pk}, ) expected = { "error": "Sorry, the invited user has already participated " "in atleast one of the challenges which you are already" " a part of. Please try creating a new team and then invite." } response = self.client.post(self.url, self.data) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE) class DeleteParticipantFromTeamTest(BaseAPITestClass): def setUp(self): super(DeleteParticipantFromTeamTest, self).setUp() self.participant = Participant.objects.create( user=self.user, status=Participant.SELF, team=self.participant_team ) self.user2 = User.objects.create( username="user2", email="user2@platform.com", password="user2_password", ) self.participant2 = Participant.objects.create( user=self.user2, status=Participant.ACCEPTED, team=self.participant_team, ) self.url = reverse_lazy( "participants:delete_participant_from_team", kwargs={ "participant_team_pk": self.participant_team.pk, "participant_pk": self.invite_user.pk, }, ) def test_participant_does_not_exist_in_team(self): self.url = reverse_lazy( "participants:delete_participant_from_team", kwargs={ "participant_team_pk": self.participant_team.pk, "participant_pk": self.participant2.pk + 1, }, ) expected = {"error": "Participant does not exist"} response = self.client.delete(self.url, {}) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE) def test_when_participant_team_does_not_exist(self): self.url = reverse_lazy( "participants:delete_participant_from_team", kwargs={ "participant_team_pk": self.participant_team.pk + 1, "participant_pk": self.participant2.pk, }, ) expected = {"error": "ParticipantTeam does not exist"} response = self.client.delete(self.url, {}) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE) def test_when_participant_is_admin_and_wants_to_delete_himself(self): self.url = reverse_lazy( "participants:delete_participant_from_team", kwargs={ "participant_team_pk": self.participant_team.pk, "participant_pk": self.participant.pk, }, ) expected = { "error": "You are not allowed to remove yourself since you are admin. Please delete the team if you want to do so!" # noqa: ignore=E501 } response = self.client.delete(self.url, {}) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE) def test_when_participant_does_not_have_permissions_to_remove_another_participant( self ): self.url = reverse_lazy( "participants:delete_participant_from_team", kwargs={ "participant_team_pk": self.participant_team.pk, "participant_pk": self.participant2.pk, }, ) self.user3 = User.objects.create( username="user3", email="user3@platform.com", password="user3_password", ) EmailAddress.objects.create( user=self.user3, email="user3@platform.com", primary=True, verified=True, ) self.participant3 = Participant.objects.create( user=self.user3, status=Participant.ACCEPTED, team=self.participant_team, ) self.client.force_authenticate(user=self.user3) expected = { "error": "Sorry, you do not have permissions to remove this participant" } response = self.client.delete(self.url, {}) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) def test_when_a_participant_is_successfully_removed_from_team(self): self.url = reverse_lazy( "participants:delete_participant_from_team", kwargs={ "participant_team_pk": self.participant_team.pk, "participant_pk": self.participant2.pk, }, ) response = self.client.delete(self.url, {}) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) class GetTeamsAndCorrespondingChallengesForAParticipant(BaseAPITestClass): def setUp(self): super(GetTeamsAndCorrespondingChallengesForAParticipant, self).setUp() self.user2 = User.objects.create( username="user2", email="user2@platform.com", password="user2_password", ) EmailAddress.objects.create( user=self.user2, email="user2@platform.com", primary=True, verified=True, ) self.participant_team2 = ParticipantTeam.objects.create( team_name="Team B", created_by=self.user2 ) # created by user2 and not user self.participant2 = Participant.objects.create( user=self.user2, status=Participant.ACCEPTED, team=self.participant_team2, ) self.challenge_host_team = ChallengeHostTeam.objects.create( team_name="Host Team 1", created_by=self.user2 ) self.challenge1 = Challenge.objects.create( title="Test Challenge 1", short_description="Short description for test challenge 1", description="Description for test challenge 1", terms_and_conditions="Terms and conditions for test challenge 1", submission_guidelines="Submission guidelines for test challenge 1", creator=self.challenge_host_team, published=False, is_registration_open=True, enable_forum=True, leaderboard_description="Lorem ipsum dolor sit amet, consectetur adipiscing elit", anonymous_leaderboard=False, start_date=timezone.now() - timedelta(days=2), end_date=timezone.now() + timedelta(days=1), ) self.challenge1.slug = "{}-{}".format( self.challenge1.title.replace(" ", "-").lower(), self.challenge1.pk )[:199] self.challenge1.save() self.challenge2 = Challenge.objects.create( title="Test Challenge 2", short_description="Short description for test challenge 2", description="Description for test challenge 2", terms_and_conditions="Terms and conditions for test challenge 2", submission_guidelines="Submission guidelines for test challenge 2", creator=self.challenge_host_team, published=False, is_registration_open=True, enable_forum=True, anonymous_leaderboard=False, start_date=timezone.now() - timedelta(days=2), end_date=timezone.now() + timedelta(days=1), ) self.url = reverse_lazy( "participants:get_teams_and_corresponding_challenges_for_a_participant", kwargs={"challenge_pk": self.challenge1.pk}, ) self.time = timezone.now() def test_get_teams_and_corresponding_challenges_for_a_participant(self): self.challenge1.participant_teams.add(self.participant_team) self.challenge1.save() expected = { "challenge_participant_team_list": [ { "challenge": { "id": self.challenge1.id, "title": self.challenge1.title, "description": self.challenge1.description, "short_description": self.challenge1.short_description, "terms_and_conditions": self.challenge1.terms_and_conditions, "submission_guidelines": self.challenge1.submission_guidelines, "evaluation_details": self.challenge1.evaluation_details, "image": self.challenge1.image, "start_date": "{0}{1}".format( self.challenge1.start_date.isoformat(), "Z" ).replace("+00:00", ""), "end_date": "{0}{1}".format( self.challenge1.end_date.isoformat(), "Z" ).replace("+00:00", ""), "creator": { "id": self.challenge_host_team.id, "team_name": self.challenge_host_team.team_name, "created_by": self.challenge_host_team.created_by.username, "team_url": self.challenge_host_team.team_url, }, "published": self.challenge1.published, "is_registration_open": self.challenge1.is_registration_open, "enable_forum": self.challenge1.enable_forum, "leaderboard_description": self.challenge1.leaderboard_description, "anonymous_leaderboard": self.challenge1.anonymous_leaderboard, "is_active": True, "allowed_email_domains": [], "blocked_email_domains": [], "banned_email_ids": [], "approved_by_admin": False, "forum_url": self.challenge1.forum_url, "is_docker_based": self.challenge1.is_docker_based, "slug": self.challenge1.slug, "max_docker_image_size": self.challenge1.max_docker_image_size, "cli_version": self.challenge1.cli_version, }, "participant_team": { "id": self.participant_team.id, "team_name": self.participant_team.team_name, "created_by": self.participant_team.created_by.username, "team_url": self.participant_team.team_url, }, } ], "is_challenge_host": False, } response = self.client.get(self.url, {}) # checking 'datetime_now' separately because of time difference in microseconds self.assertTrue( abs(response.data["datetime_now"] - self.time) < timedelta(seconds=1) ) # deleting field 'datetime_now' from response to check with expected response without time field del response.data["datetime_now"] self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_get_participant_team_challenge_list(self): self.url = reverse_lazy( "participants:get_participant_team_challenge_list", kwargs={"participant_team_pk": self.participant_team.pk}, ) expected = [ { "id": self.challenge1.id, "title": self.challenge1.title, "description": self.challenge1.description, "short_description": self.challenge1.short_description, "terms_and_conditions": self.challenge1.terms_and_conditions, "submission_guidelines": self.challenge1.submission_guidelines, "evaluation_details": self.challenge1.evaluation_details, "image": self.challenge1.image, "start_date": "{0}{1}".format( self.challenge1.start_date.isoformat(), "Z" ).replace("+00:00", ""), "end_date": "{0}{1}".format( self.challenge1.end_date.isoformat(), "Z" ).replace("+00:00", ""), "creator": { "id": self.challenge_host_team.id, "team_name": self.challenge_host_team.team_name, "created_by": self.challenge_host_team.created_by.username, "team_url": self.challenge_host_team.team_url, }, "published": self.challenge1.published, "is_registration_open": self.challenge1.is_registration_open, "enable_forum": self.challenge1.enable_forum, "leaderboard_description": self.challenge1.leaderboard_description, "anonymous_leaderboard": self.challenge1.anonymous_leaderboard, "is_active": True, "allowed_email_domains": [], "blocked_email_domains": [], "banned_email_ids": [], "approved_by_admin": False, "forum_url": self.challenge1.forum_url, "is_docker_based": self.challenge1.is_docker_based, "slug": self.challenge1.slug, "max_docker_image_size": self.challenge1.max_docker_image_size, "cli_version": self.challenge1.cli_version, } ] self.challenge1.participant_teams.add(self.participant_team) self.challenge1.save() response = self.client.get(self.url, {}) self.assertEqual(response.data["results"], expected) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_when_participant_team_hasnot_participated_in_any_challenge(self): expected = { "challenge_participant_team_list": [ { "challenge": None, "participant_team": { "id": self.participant_team.id, "team_name": self.participant_team.team_name, "created_by": self.participant_team.created_by.username, "team_url": self.participant_team.team_url, }, } ], "is_challenge_host": False, } response = self.client.get(self.url, {}) # checking 'datetime_now' separately because of time difference in microseconds self.assertTrue( abs(response.data["datetime_now"] - self.time) < timedelta(seconds=1) ) # deleting field 'datetime_now' from response to check with expected response without time field del response.data["datetime_now"] self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_when_there_is_no_participant_team_of_user(self): self.participant_team.delete() expected = { "challenge_participant_team_list": [], "is_challenge_host": False, } response = self.client.get(self.url, {}) # checking 'datetime_now' separately because of time difference in microseconds self.assertTrue( abs(response.data["datetime_now"] - self.time) < timedelta(seconds=1) ) # deleting field 'datetime_now' from response to check with expected response without time field del response.data["datetime_now"] self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_200_OK) class RemoveSelfFromParticipantTeamTest(BaseAPITestClass): def setUp(self): super(RemoveSelfFromParticipantTeamTest, self).setUp() # user who create a challenge host team self.user2 = User.objects.create( username="someuser2", password="some_secret_password" ) self.challenge_host_team = ChallengeHostTeam.objects.create( team_name="Some Test Challenge Host Team", created_by=self.user2 ) self.challenge_host2 = ChallengeHost.objects.create( user=self.user2, team_name=self.challenge_host_team, status=ChallengeHost.ACCEPTED, permissions=ChallengeHost.ADMIN, ) self.challenge = Challenge.objects.create( title="Some Test Challenge", short_description="Short description for some test challenge", description="Description for some test challenge", terms_and_conditions="Terms and conditions for some test challenge", submission_guidelines="Submission guidelines for some test challenge", creator=self.challenge_host_team, published=False, is_registration_open=True, enable_forum=True, leaderboard_description="Fusce quis sapien eget sem accumsan euismod", anonymous_leaderboard=False, start_date=timezone.now() - timedelta(days=2), end_date=timezone.now() + timedelta(days=1), ) self.url = reverse_lazy( "participants:remove_self_from_participant_team", kwargs={"participant_team_pk": self.participant_team.pk}, ) def test_when_participant_team_does_not_exist(self): self.url = reverse_lazy( "participants:remove_self_from_participant_team", kwargs={"participant_team_pk": self.participant_team.pk + 1}, ) expected = {"error": "ParticipantTeam does not exist!"} response = self.client.delete(self.url, {}) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE) def test_when_a_participant_is_successfully_removed_from_team(self): self.url = reverse_lazy( "participants:remove_self_from_participant_team", kwargs={"participant_team_pk": self.participant_team.pk}, ) response = self.client.delete(self.url, {}) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) def test_when_participant_team_has_taken_part_in_challenges(self): self.challenge.participant_teams.add(self.participant_team) expected = { "error": "Sorry, you cannot delete this team since it has taken part in challenge(s)!" } response = self.client.delete(self.url, {}) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_participant_team_remove_when_no_participants_exists(self): self.url = reverse_lazy( "participants:remove_self_from_participant_team", kwargs={"participant_team_pk": self.participant_team.pk}, ) self.client.delete(self.url, {}) participant_teams = ParticipantTeam.objects.all() self.assertEqual(participant_teams.count(), 0)
39.089535
148
0.614124
from datetime import timedelta from django.core.urlresolvers import reverse_lazy from django.contrib.auth.models import User from django.utils import timezone from allauth.account.models import EmailAddress from rest_framework import status from rest_framework.test import APITestCase, APIClient from challenges.models import Challenge from hosts.models import ChallengeHost, ChallengeHostTeam from participants.models import ParticipantTeam, Participant class BaseAPITestClass(APITestCase): def setUp(self): self.client = APIClient(enforce_csrf_checks=True) self.user = User.objects.create( username="someuser", email="user@test.com", password="secret_password", ) EmailAddress.objects.create( user=self.user, email="user@test.com", primary=True, verified=True ) self.invite_user = User.objects.create( username="otheruser", email="other@platform.com", password="other_secret_password", ) self.participant_team = ParticipantTeam.objects.create( team_name="Participant Team", created_by=self.user ) self.participant = Participant.objects.create( user=self.user, team=self.participant_team, status=Participant.SELF ) self.client.force_authenticate(user=self.user) class GetParticipantTeamTest(BaseAPITestClass): url = reverse_lazy("participants:get_participant_team_list") def setUp(self): super(GetParticipantTeamTest, self).setUp() self.user2 = User.objects.create( username="user2", email="user2@platform.com", password="user2_password", ) EmailAddress.objects.create( user=self.user2, email="user2@platform.com", primary=True, verified=True, ) self.participant2 = Participant.objects.create( user=self.user2, status=Participant.ACCEPTED, team=self.participant_team, ) def test_get_challenge(self): expected = [ { "id": self.participant_team.pk, "team_name": self.participant_team.team_name, "created_by": self.user.username, "team_url": self.participant_team.team_url, "members": [ { "member_name": self.participant.user.username, "status": self.participant.status, "member_id": self.participant.user.id, }, { "member_name": self.participant2.user.username, "status": self.participant2.status, "member_id": self.participant2.user.id, }, ], } ] response = self.client.get(self.url, {}) self.assertEqual(response.data["results"], expected) self.assertEqual(response.status_code, status.HTTP_200_OK) class CreateParticipantTeamTest(BaseAPITestClass): url = reverse_lazy("participants:get_participant_team_list") def setUp(self): super(CreateParticipantTeamTest, self).setUp() self.data = {"team_name": "New Participant Team"} def test_create_participant_team_with_all_data(self): response = self.client.post(self.url, self.data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_create_participant_team_with_team_name_same_as_with_existing_team( self ): expected = { "team_name": [ "participant team with this team name already exists." ] } response = self.client.post(self.url, self.data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.client.post(self.url, self.data) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual(response.data, expected) def test_create_participant_team_with_no_data(self): del self.data["team_name"] response = self.client.post(self.url, self.data) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) class GetParticularParticipantTeam(BaseAPITestClass): def setUp(self): super(GetParticularParticipantTeam, self).setUp() self.url = reverse_lazy( "participants:get_participant_team_details", kwargs={"pk": self.participant_team.pk}, ) self.user2 = User.objects.create( username="user2", email="user2@platform.com", password="user2_password", ) EmailAddress.objects.create( user=self.user2, email="user2@platform.com", primary=True, verified=True, ) self.participant2 = Participant.objects.create( user=self.user2, status=Participant.ACCEPTED, team=self.participant_team, ) def test_get_particular_participant_team(self): expected = { "id": self.participant_team.pk, "team_name": self.participant_team.team_name, "created_by": self.user.username, "team_url": self.participant_team.team_url, "members": [ { "member_name": self.participant.user.username, "status": self.participant.status, "member_id": self.participant.user.id, }, { "member_name": self.participant2.user.username, "status": self.participant2.status, "member_id": self.participant2.user.id, }, ], } response = self.client.get(self.url, {}) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_particular_participant_team_does_not_exist(self): self.url = reverse_lazy( "participants:get_participant_team_details", kwargs={"pk": self.participant_team.pk + 1}, ) expected = {"error": "ParticipantTeam does not exist"} response = self.client.get(self.url, {}) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE) class UpdateParticularParticipantTeam(BaseAPITestClass): def setUp(self): super(UpdateParticularParticipantTeam, self).setUp() self.url = reverse_lazy( "participants:get_participant_team_details", kwargs={"pk": self.participant_team.pk}, ) self.partial_update_participant_team_name = ( "Partial Update Participant Team" ) self.update_participant_team_name = "Update Test Participant Team" self.data = {"team_name": self.update_participant_team_name} def test_particular_participant_team_partial_update(self): self.partial_update_data = { "team_name": self.partial_update_participant_team_name } expected = { "id": self.participant_team.pk, "team_name": self.partial_update_participant_team_name, "created_by": self.user.username, "team_url": self.participant_team.team_url, } response = self.client.patch(self.url, self.partial_update_data) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_particular_participant_team_update(self): expected = { "id": self.participant_team.pk, "team_name": self.update_participant_team_name, "created_by": self.user.username, "team_url": self.participant_team.team_url, } response = self.client.put(self.url, self.data) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_particular_participant_team_update_with_no_data(self): self.data = {"team_name": ""} response = self.client.put(self.url, self.data) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) class DeleteParticularParticipantTeam(BaseAPITestClass): def setUp(self): super(DeleteParticularParticipantTeam, self).setUp() self.url = reverse_lazy( "participants:get_participant_team_details", kwargs={"pk": self.participant_team.pk}, ) def test_particular_participant_team_delete(self): response = self.client.delete(self.url, {}) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) class InviteParticipantToTeamTest(BaseAPITestClass): def setUp(self): super(InviteParticipantToTeamTest, self).setUp() self.data = {"email": self.invite_user.email} self.url = reverse_lazy( "participants:invite_participant_to_team", kwargs={"pk": self.participant_team.pk}, ) def test_invite_participant_to_team_with_all_data(self): expected = {"message": "User has been successfully added to the team!"} response = self.client.post(self.url, self.data) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) def test_invite_participant_to_team_with_no_data(self): del self.data["email"] response = self.client.post(self.url, self.data) self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE) def test_invite_self_to_team(self): self.data = {"email": self.user.email} expected = {"error": "User is already part of the team!"} response = self.client.post(self.url, self.data) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE) def test_invite_to_other_team_which_doesnot_belong_to_user(self): temp_user = User.objects.create( username="temp_user", password="test_password" ) temp_participant_team = ParticipantTeam.objects.create( team_name="Test Team 1", created_by=temp_user ) expected = {"error": "You are not a member of this team!"} self.url = reverse_lazy( "participants:invite_participant_to_team", kwargs={"pk": temp_participant_team.pk}, ) response = self.client.post(self.url, self.data) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_invite_user_which_does_not_exist_to_team(self): self.data = {"email": "userwhichdoesnotexist@platform.com"} expected = {"error": "User does not exist with this email address!"} response = self.client.post(self.url, self.data) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE) def test_particular_participant_team_for_invite_does_not_exist(self): self.url = reverse_lazy( "participants:invite_participant_to_team", kwargs={"pk": self.participant_team.pk + 1}, ) expected = {"error": "Participant Team does not exist"} response = self.client.post(self.url, {}) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_invite_participant_to_team_when_user_cannot_be_invited(self): self.user2 = User.objects.create( username="user2", email="user2@platform.com", password="user2_password", ) EmailAddress.objects.create( user=self.user2, email="user2@platform.com", primary=True, verified=True, ) self.user3 = User.objects.create( username="user3", email="user3@platform.com", password="user3_password", ) EmailAddress.objects.create( user=self.user3, email="user3@platform.com", primary=True, verified=True, ) self.participant_team2 = ParticipantTeam.objects.create( team_name="Participant Team created by user 2", created_by=self.user2, ) self.participant_team3 = ParticipantTeam.objects.create( team_name="Participant Team created by user 3", created_by=self.user3, ) self.participant2 = Participant.objects.create( user=self.user2, status=Participant.ACCEPTED, team=self.participant_team2, ) self.participant3 = Participant.objects.create( user=self.user3, status=Participant.ACCEPTED, team=self.participant_team3, ) self.challenge_host_team = ChallengeHostTeam.objects.create( team_name="Test Challenge Host Team", created_by=self.user ) self.challenge = Challenge.objects.create( title="Test Challenge", short_description="Short description for test challenge", description="Description for test challenge", terms_and_conditions="Terms and conditions for test challenge", submission_guidelines="Submission guidelines for test challenge", creator=self.challenge_host_team, published=False, enable_forum=True, leaderboard_description=None, anonymous_leaderboard=False, start_date=timezone.now() - timedelta(days=2), end_date=timezone.now() + timedelta(days=1), ) self.client.force_authenticate(user=self.user2) self.challenge.participant_teams.add(self.participant_team2) self.challenge.participant_teams.add(self.participant_team3) self.data = {"email": self.user3.email} self.url = reverse_lazy( "participants:invite_participant_to_team", kwargs={"pk": self.participant_team2.pk}, ) expected = { "error": "Sorry, the invited user has already participated " "in atleast one of the challenges which you are already" " a part of. Please try creating a new team and then invite." } response = self.client.post(self.url, self.data) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE) class DeleteParticipantFromTeamTest(BaseAPITestClass): def setUp(self): super(DeleteParticipantFromTeamTest, self).setUp() self.participant = Participant.objects.create( user=self.user, status=Participant.SELF, team=self.participant_team ) self.user2 = User.objects.create( username="user2", email="user2@platform.com", password="user2_password", ) self.participant2 = Participant.objects.create( user=self.user2, status=Participant.ACCEPTED, team=self.participant_team, ) self.url = reverse_lazy( "participants:delete_participant_from_team", kwargs={ "participant_team_pk": self.participant_team.pk, "participant_pk": self.invite_user.pk, }, ) def test_participant_does_not_exist_in_team(self): self.url = reverse_lazy( "participants:delete_participant_from_team", kwargs={ "participant_team_pk": self.participant_team.pk, "participant_pk": self.participant2.pk + 1, }, ) expected = {"error": "Participant does not exist"} response = self.client.delete(self.url, {}) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE) def test_when_participant_team_does_not_exist(self): self.url = reverse_lazy( "participants:delete_participant_from_team", kwargs={ "participant_team_pk": self.participant_team.pk + 1, "participant_pk": self.participant2.pk, }, ) expected = {"error": "ParticipantTeam does not exist"} response = self.client.delete(self.url, {}) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE) def test_when_participant_is_admin_and_wants_to_delete_himself(self): self.url = reverse_lazy( "participants:delete_participant_from_team", kwargs={ "participant_team_pk": self.participant_team.pk, "participant_pk": self.participant.pk, }, ) expected = { "error": "You are not allowed to remove yourself since you are admin. Please delete the team if you want to do so!" } response = self.client.delete(self.url, {}) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE) def test_when_participant_does_not_have_permissions_to_remove_another_participant( self ): self.url = reverse_lazy( "participants:delete_participant_from_team", kwargs={ "participant_team_pk": self.participant_team.pk, "participant_pk": self.participant2.pk, }, ) self.user3 = User.objects.create( username="user3", email="user3@platform.com", password="user3_password", ) EmailAddress.objects.create( user=self.user3, email="user3@platform.com", primary=True, verified=True, ) self.participant3 = Participant.objects.create( user=self.user3, status=Participant.ACCEPTED, team=self.participant_team, ) self.client.force_authenticate(user=self.user3) expected = { "error": "Sorry, you do not have permissions to remove this participant" } response = self.client.delete(self.url, {}) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) def test_when_a_participant_is_successfully_removed_from_team(self): self.url = reverse_lazy( "participants:delete_participant_from_team", kwargs={ "participant_team_pk": self.participant_team.pk, "participant_pk": self.participant2.pk, }, ) response = self.client.delete(self.url, {}) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) class GetTeamsAndCorrespondingChallengesForAParticipant(BaseAPITestClass): def setUp(self): super(GetTeamsAndCorrespondingChallengesForAParticipant, self).setUp() self.user2 = User.objects.create( username="user2", email="user2@platform.com", password="user2_password", ) EmailAddress.objects.create( user=self.user2, email="user2@platform.com", primary=True, verified=True, ) self.participant_team2 = ParticipantTeam.objects.create( team_name="Team B", created_by=self.user2 ) self.participant2 = Participant.objects.create( user=self.user2, status=Participant.ACCEPTED, team=self.participant_team2, ) self.challenge_host_team = ChallengeHostTeam.objects.create( team_name="Host Team 1", created_by=self.user2 ) self.challenge1 = Challenge.objects.create( title="Test Challenge 1", short_description="Short description for test challenge 1", description="Description for test challenge 1", terms_and_conditions="Terms and conditions for test challenge 1", submission_guidelines="Submission guidelines for test challenge 1", creator=self.challenge_host_team, published=False, is_registration_open=True, enable_forum=True, leaderboard_description="Lorem ipsum dolor sit amet, consectetur adipiscing elit", anonymous_leaderboard=False, start_date=timezone.now() - timedelta(days=2), end_date=timezone.now() + timedelta(days=1), ) self.challenge1.slug = "{}-{}".format( self.challenge1.title.replace(" ", "-").lower(), self.challenge1.pk )[:199] self.challenge1.save() self.challenge2 = Challenge.objects.create( title="Test Challenge 2", short_description="Short description for test challenge 2", description="Description for test challenge 2", terms_and_conditions="Terms and conditions for test challenge 2", submission_guidelines="Submission guidelines for test challenge 2", creator=self.challenge_host_team, published=False, is_registration_open=True, enable_forum=True, anonymous_leaderboard=False, start_date=timezone.now() - timedelta(days=2), end_date=timezone.now() + timedelta(days=1), ) self.url = reverse_lazy( "participants:get_teams_and_corresponding_challenges_for_a_participant", kwargs={"challenge_pk": self.challenge1.pk}, ) self.time = timezone.now() def test_get_teams_and_corresponding_challenges_for_a_participant(self): self.challenge1.participant_teams.add(self.participant_team) self.challenge1.save() expected = { "challenge_participant_team_list": [ { "challenge": { "id": self.challenge1.id, "title": self.challenge1.title, "description": self.challenge1.description, "short_description": self.challenge1.short_description, "terms_and_conditions": self.challenge1.terms_and_conditions, "submission_guidelines": self.challenge1.submission_guidelines, "evaluation_details": self.challenge1.evaluation_details, "image": self.challenge1.image, "start_date": "{0}{1}".format( self.challenge1.start_date.isoformat(), "Z" ).replace("+00:00", ""), "end_date": "{0}{1}".format( self.challenge1.end_date.isoformat(), "Z" ).replace("+00:00", ""), "creator": { "id": self.challenge_host_team.id, "team_name": self.challenge_host_team.team_name, "created_by": self.challenge_host_team.created_by.username, "team_url": self.challenge_host_team.team_url, }, "published": self.challenge1.published, "is_registration_open": self.challenge1.is_registration_open, "enable_forum": self.challenge1.enable_forum, "leaderboard_description": self.challenge1.leaderboard_description, "anonymous_leaderboard": self.challenge1.anonymous_leaderboard, "is_active": True, "allowed_email_domains": [], "blocked_email_domains": [], "banned_email_ids": [], "approved_by_admin": False, "forum_url": self.challenge1.forum_url, "is_docker_based": self.challenge1.is_docker_based, "slug": self.challenge1.slug, "max_docker_image_size": self.challenge1.max_docker_image_size, "cli_version": self.challenge1.cli_version, }, "participant_team": { "id": self.participant_team.id, "team_name": self.participant_team.team_name, "created_by": self.participant_team.created_by.username, "team_url": self.participant_team.team_url, }, } ], "is_challenge_host": False, } response = self.client.get(self.url, {}) self.assertTrue( abs(response.data["datetime_now"] - self.time) < timedelta(seconds=1) ) del response.data["datetime_now"] self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_get_participant_team_challenge_list(self): self.url = reverse_lazy( "participants:get_participant_team_challenge_list", kwargs={"participant_team_pk": self.participant_team.pk}, ) expected = [ { "id": self.challenge1.id, "title": self.challenge1.title, "description": self.challenge1.description, "short_description": self.challenge1.short_description, "terms_and_conditions": self.challenge1.terms_and_conditions, "submission_guidelines": self.challenge1.submission_guidelines, "evaluation_details": self.challenge1.evaluation_details, "image": self.challenge1.image, "start_date": "{0}{1}".format( self.challenge1.start_date.isoformat(), "Z" ).replace("+00:00", ""), "end_date": "{0}{1}".format( self.challenge1.end_date.isoformat(), "Z" ).replace("+00:00", ""), "creator": { "id": self.challenge_host_team.id, "team_name": self.challenge_host_team.team_name, "created_by": self.challenge_host_team.created_by.username, "team_url": self.challenge_host_team.team_url, }, "published": self.challenge1.published, "is_registration_open": self.challenge1.is_registration_open, "enable_forum": self.challenge1.enable_forum, "leaderboard_description": self.challenge1.leaderboard_description, "anonymous_leaderboard": self.challenge1.anonymous_leaderboard, "is_active": True, "allowed_email_domains": [], "blocked_email_domains": [], "banned_email_ids": [], "approved_by_admin": False, "forum_url": self.challenge1.forum_url, "is_docker_based": self.challenge1.is_docker_based, "slug": self.challenge1.slug, "max_docker_image_size": self.challenge1.max_docker_image_size, "cli_version": self.challenge1.cli_version, } ] self.challenge1.participant_teams.add(self.participant_team) self.challenge1.save() response = self.client.get(self.url, {}) self.assertEqual(response.data["results"], expected) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_when_participant_team_hasnot_participated_in_any_challenge(self): expected = { "challenge_participant_team_list": [ { "challenge": None, "participant_team": { "id": self.participant_team.id, "team_name": self.participant_team.team_name, "created_by": self.participant_team.created_by.username, "team_url": self.participant_team.team_url, }, } ], "is_challenge_host": False, } response = self.client.get(self.url, {}) self.assertTrue( abs(response.data["datetime_now"] - self.time) < timedelta(seconds=1) ) del response.data["datetime_now"] self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_when_there_is_no_participant_team_of_user(self): self.participant_team.delete() expected = { "challenge_participant_team_list": [], "is_challenge_host": False, } response = self.client.get(self.url, {}) self.assertTrue( abs(response.data["datetime_now"] - self.time) < timedelta(seconds=1) ) del response.data["datetime_now"] self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_200_OK) class RemoveSelfFromParticipantTeamTest(BaseAPITestClass): def setUp(self): super(RemoveSelfFromParticipantTeamTest, self).setUp() self.user2 = User.objects.create( username="someuser2", password="some_secret_password" ) self.challenge_host_team = ChallengeHostTeam.objects.create( team_name="Some Test Challenge Host Team", created_by=self.user2 ) self.challenge_host2 = ChallengeHost.objects.create( user=self.user2, team_name=self.challenge_host_team, status=ChallengeHost.ACCEPTED, permissions=ChallengeHost.ADMIN, ) self.challenge = Challenge.objects.create( title="Some Test Challenge", short_description="Short description for some test challenge", description="Description for some test challenge", terms_and_conditions="Terms and conditions for some test challenge", submission_guidelines="Submission guidelines for some test challenge", creator=self.challenge_host_team, published=False, is_registration_open=True, enable_forum=True, leaderboard_description="Fusce quis sapien eget sem accumsan euismod", anonymous_leaderboard=False, start_date=timezone.now() - timedelta(days=2), end_date=timezone.now() + timedelta(days=1), ) self.url = reverse_lazy( "participants:remove_self_from_participant_team", kwargs={"participant_team_pk": self.participant_team.pk}, ) def test_when_participant_team_does_not_exist(self): self.url = reverse_lazy( "participants:remove_self_from_participant_team", kwargs={"participant_team_pk": self.participant_team.pk + 1}, ) expected = {"error": "ParticipantTeam does not exist!"} response = self.client.delete(self.url, {}) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE) def test_when_a_participant_is_successfully_removed_from_team(self): self.url = reverse_lazy( "participants:remove_self_from_participant_team", kwargs={"participant_team_pk": self.participant_team.pk}, ) response = self.client.delete(self.url, {}) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) def test_when_participant_team_has_taken_part_in_challenges(self): self.challenge.participant_teams.add(self.participant_team) expected = { "error": "Sorry, you cannot delete this team since it has taken part in challenge(s)!" } response = self.client.delete(self.url, {}) self.assertEqual(response.data, expected) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_participant_team_remove_when_no_participants_exists(self): self.url = reverse_lazy( "participants:remove_self_from_participant_team", kwargs={"participant_team_pk": self.participant_team.pk}, ) self.client.delete(self.url, {}) participant_teams = ParticipantTeam.objects.all() self.assertEqual(participant_teams.count(), 0)
true
true
f7029017ccc2630634311f5e276c0e3fb26d7895
6,132
py
Python
main.py
Ansaku/TempEmail-Bot
680e6456e9a9caa19df6e8cc3d6b2cb3ff998ff4
[ "Apache-2.0" ]
null
null
null
main.py
Ansaku/TempEmail-Bot
680e6456e9a9caa19df6e8cc3d6b2cb3ff998ff4
[ "Apache-2.0" ]
null
null
null
main.py
Ansaku/TempEmail-Bot
680e6456e9a9caa19df6e8cc3d6b2cb3ff998ff4
[ "Apache-2.0" ]
1
2022-03-23T23:36:29.000Z
2022-03-23T23:36:29.000Z
# copyright 2022 @Ansaku # Telegram @AnkiSatya # Instagram @satya_ask import telebot import requests from telebot.types import InlineKeyboardButton # Fillout Here The BotToken it gets from botfather further queries @AnkiSatya 0n telegram bot = telebot.TeleBot('**********************') while True: try: keyboard = telebot.types.ReplyKeyboardMarkup(resize_keyboard=True) keyboard.add(InlineKeyboardButton(text='Buat email')) keyboard.add(InlineKeyboardButton(text='Refresh pesan')) keyboard.add(InlineKeyboardButton(text='Tentang')) @bot.message_handler(commands=['start']) def start_message(message): bot.send_message(message.chat.id, 'Hai Pengguna., Selamat datang di TempEmail Bot \nPenggunaan:\nUntuk Menghasilkan email klik tombol "Buat email"\nUntuk menyegarkan kotak masuk Anda, klik tombol "Refresh inbox". Setelah surat baru tiba, Anda akan melihat tombol dengan baris subjek, klik tombol read the message. \n\n Dev : @AnkiSatya', reply_markup=keyboard) @bot.message_handler(content_types=['text']) def send_text(message): if message.text.lower() == 'buat email': email = requests.get("https://www.1secmail.com/api/v1/?action=genRandomMailbox&count=1").json()[0] ekeyboard = telebot.types.ReplyKeyboardMarkup(resize_keyboard=True) ekeyboard.add(InlineKeyboardButton(text='Buat email')) ekeyboard.add(InlineKeyboardButton(text='Refresh pesan\n[' + str(email) + "]")) ekeyboard.add(InlineKeyboardButton(text='Tentang')) bot.send_message(message.chat.id, "E-Mail Sementara Anda:") bot.send_message(message.chat.id, str(email), reply_markup=ekeyboard) elif message.text.lower() == 'refresh pesan': bot.send_message(message.chat.id, 'Pertama, buat email anda', reply_markup=keyboard) elif message.text.lower() == 'tentang': bot.send_message(message.chat.id, 'Apa itu Email Semantara?\n- Itu adalah layanan email gratis yang memungkinkan untuk menerima email di alamat sementara yang akan dihancurkan sendiri setelah waktu tertentu berlalu. Itu juga dikenal dengan nama-nama seperti tempmail, 10minutemail, 10minmail, throwaway email, fake-mail , fake email generator, burner mail atau trash-mail\n\nBagaimana Email Sementara Menjadi Lebih Aman bagi Anda?\n- Menggunakan Email sementara memungkinkan Anda untuk sepenuhnya melindungi kotak surat asli Anda dari hilangnya informasi pribadi. Alamat email sementara Anda sepenuhnya anonim. Detail Anda: informasi tentang orang Anda dan pengguna yang berkomunikasi dengan Anda, alamat IP, alamat email dilindungi dan sepenuhnya dirahasiakan.\n\n➪ Nama Bot : TempMail Bot\n➪ Pembuat : @AnkiSatya\n➪ Language : Python \n➪ Donasi : https://saweria.co/ansaku') elif message.text.lower()[14] == "[": email = message.text.lower()[15:message.text.lower().find("]")] bkeyboard = telebot.types.ReplyKeyboardMarkup(resize_keyboard=True) bkeyboard.add(InlineKeyboardButton(text='Refresh pesan\n[' + str(email) + "]")) bkeyboard.add(InlineKeyboardButton(text='Buat email')) try: data = requests.get( "https://www.1secmail.com/api/v1/?action=getMessages&login=" + email[:email.find( "@")] + "&domain=" + email[email.find("@") + 1:]).json() if 'id' in data[0]: for i in range(len(data)): id = data[i]['id'] subject = data[i]['subject'] fromm = data[i]['from'] date = data[i]['date'] if len(subject) > 15: subject = str(subject[0:15]) + "..." bkeyboard.add(InlineKeyboardButton( text=str(subject) + "\n dari: " + fromm + " in " + "[id" + str(id) + "][" + str( email) + "]")) bot.send_message(message.chat.id, "Subjek: " + subject + "\n Dari: " + fromm + "\n Tanggal:" + date, reply_markup=bkeyboard) count = i + 1 bot.send_message(message.chat.id, "Di Sini " + str( count) + " Pesan ditemukan\nKlik tombol di bawah untuk membaca pesan\n\n Info lebih lanjut @AnkiSatya") else: bot.send_message(message.chat.id, 'Tidak ditemukan', reply_markup=bkeyboard) except BaseException: bot.send_message(message.chat.id, 'Tidak ada pesan yang diterima...', reply_markup=bkeyboard) elif message.text.lower().find("[id"): try: data = message.text.lower()[message.text.lower().find("[id"):] id = data[data.find("[") + 3:data.find(']')] email = data[data.find("][") + 2:-1] msg = requests.get("https://www.1secmail.com/api/v1/?action=readMessage&login=" + email[:email.find( "@")] + "&domain=" + email[email.find("@") + 1:] + "&id=" + id).json() bot.send_message(message.chat.id, 'Pesan ✉️\n\n Dari: ' + msg['from'] + "\n Subjek: " + msg[ 'subject'] + "\n Tanggal: " + msg[ 'date'] + "\n Teks: " + msg['textBody']) except BaseException: pass bot.polling(none_stop=True, interval=1, timeout=5000) except BaseException: pass # Stay tuned for more : Telegram @AnkiSatya
66.652174
876
0.551696
import telebot import requests from telebot.types import InlineKeyboardButton bot = telebot.TeleBot('**********************') while True: try: keyboard = telebot.types.ReplyKeyboardMarkup(resize_keyboard=True) keyboard.add(InlineKeyboardButton(text='Buat email')) keyboard.add(InlineKeyboardButton(text='Refresh pesan')) keyboard.add(InlineKeyboardButton(text='Tentang')) @bot.message_handler(commands=['start']) def start_message(message): bot.send_message(message.chat.id, 'Hai Pengguna., Selamat datang di TempEmail Bot \nPenggunaan:\nUntuk Menghasilkan email klik tombol "Buat email"\nUntuk menyegarkan kotak masuk Anda, klik tombol "Refresh inbox". Setelah surat baru tiba, Anda akan melihat tombol dengan baris subjek, klik tombol read the message. \n\n Dev : @AnkiSatya', reply_markup=keyboard) @bot.message_handler(content_types=['text']) def send_text(message): if message.text.lower() == 'buat email': email = requests.get("https://www.1secmail.com/api/v1/?action=genRandomMailbox&count=1").json()[0] ekeyboard = telebot.types.ReplyKeyboardMarkup(resize_keyboard=True) ekeyboard.add(InlineKeyboardButton(text='Buat email')) ekeyboard.add(InlineKeyboardButton(text='Refresh pesan\n[' + str(email) + "]")) ekeyboard.add(InlineKeyboardButton(text='Tentang')) bot.send_message(message.chat.id, "E-Mail Sementara Anda:") bot.send_message(message.chat.id, str(email), reply_markup=ekeyboard) elif message.text.lower() == 'refresh pesan': bot.send_message(message.chat.id, 'Pertama, buat email anda', reply_markup=keyboard) elif message.text.lower() == 'tentang': bot.send_message(message.chat.id, 'Apa itu Email Semantara?\n- Itu adalah layanan email gratis yang memungkinkan untuk menerima email di alamat sementara yang akan dihancurkan sendiri setelah waktu tertentu berlalu. Itu juga dikenal dengan nama-nama seperti tempmail, 10minutemail, 10minmail, throwaway email, fake-mail , fake email generator, burner mail atau trash-mail\n\nBagaimana Email Sementara Menjadi Lebih Aman bagi Anda?\n- Menggunakan Email sementara memungkinkan Anda untuk sepenuhnya melindungi kotak surat asli Anda dari hilangnya informasi pribadi. Alamat email sementara Anda sepenuhnya anonim. Detail Anda: informasi tentang orang Anda dan pengguna yang berkomunikasi dengan Anda, alamat IP, alamat email dilindungi dan sepenuhnya dirahasiakan.\n\n➪ Nama Bot : TempMail Bot\n➪ Pembuat : @AnkiSatya\n➪ Language : Python \n➪ Donasi : https://saweria.co/ansaku') elif message.text.lower()[14] == "[": email = message.text.lower()[15:message.text.lower().find("]")] bkeyboard = telebot.types.ReplyKeyboardMarkup(resize_keyboard=True) bkeyboard.add(InlineKeyboardButton(text='Refresh pesan\n[' + str(email) + "]")) bkeyboard.add(InlineKeyboardButton(text='Buat email')) try: data = requests.get( "https://www.1secmail.com/api/v1/?action=getMessages&login=" + email[:email.find( "@")] + "&domain=" + email[email.find("@") + 1:]).json() if 'id' in data[0]: for i in range(len(data)): id = data[i]['id'] subject = data[i]['subject'] fromm = data[i]['from'] date = data[i]['date'] if len(subject) > 15: subject = str(subject[0:15]) + "..." bkeyboard.add(InlineKeyboardButton( text=str(subject) + "\n dari: " + fromm + " in " + "[id" + str(id) + "][" + str( email) + "]")) bot.send_message(message.chat.id, "Subjek: " + subject + "\n Dari: " + fromm + "\n Tanggal:" + date, reply_markup=bkeyboard) count = i + 1 bot.send_message(message.chat.id, "Di Sini " + str( count) + " Pesan ditemukan\nKlik tombol di bawah untuk membaca pesan\n\n Info lebih lanjut @AnkiSatya") else: bot.send_message(message.chat.id, 'Tidak ditemukan', reply_markup=bkeyboard) except BaseException: bot.send_message(message.chat.id, 'Tidak ada pesan yang diterima...', reply_markup=bkeyboard) elif message.text.lower().find("[id"): try: data = message.text.lower()[message.text.lower().find("[id"):] id = data[data.find("[") + 3:data.find(']')] email = data[data.find("][") + 2:-1] msg = requests.get("https://www.1secmail.com/api/v1/?action=readMessage&login=" + email[:email.find( "@")] + "&domain=" + email[email.find("@") + 1:] + "&id=" + id).json() bot.send_message(message.chat.id, 'Pesan ✉️\n\n Dari: ' + msg['from'] + "\n Subjek: " + msg[ 'subject'] + "\n Tanggal: " + msg[ 'date'] + "\n Teks: " + msg['textBody']) except BaseException: pass bot.polling(none_stop=True, interval=1, timeout=5000) except BaseException: pass
true
true
f7029380b00f9f11d71141fa3f7732ac4f5ba6c7
8,120
py
Python
lib/dnsServer/namecoindns.py
bankonmecoin/nmcontrol
49a879b98bb2885e3ba7c9fdf6dfe7e122e0edad
[ "FTL" ]
161
2015-01-08T19:45:20.000Z
2022-01-15T18:25:21.000Z
lib/dnsServer/namecoindns.py
bankonmecoin/nmcontrol
49a879b98bb2885e3ba7c9fdf6dfe7e122e0edad
[ "FTL" ]
77
2015-01-02T02:20:59.000Z
2020-05-25T08:33:56.000Z
lib/dnsServer/namecoindns.py
namecoin/nmcontrol
49a879b98bb2885e3ba7c9fdf6dfe7e122e0edad
[ "FTL" ]
36
2015-02-26T01:51:53.000Z
2022-01-15T18:25:45.000Z
#name_scan "d/yourdomain" 1 import sys, os #sys.path.append('/home/khal/sources/nmcontrol/lib/') import DNS import rpcClient import struct, listdns, base64, types, json, random #from jsonrpc import ServiceProxy from utils import * from common import * class Source(object): #def __init__(self): #self.servers = app['services']['dns'].conf['resolver'].split(',') #self.reqobj = DNS.Request() #jsonfile = open("config.json", "r") #data = json.loads(jsonfile.read()) #jsonfile.close() #username = str(data[u"username"]) #port = data[u"port"] #password = str(data[u"password"]) #self.sp = ServiceProxy("http://%(user)s:%(passwd)s@127.0.0.1:%(port)d" % dict(user=username, passwd=password, port=port)) #elf.sp = rpcClient.rpcClientNamecoin('127.0.0.1', port, username, password) #self.sp = app['plugins']['domain'] # def _parse_file(self): # f = open(self._filename, "r") # for line in f.readlines(): # line = line.strip() # if line and line[0] != '#': # question, type, value = line.split() # question = question.lower() # type = type.upper() # if question == '@': # question = '' # if type == 'A': # answer = struct.pack("!I", ipstr2int(value)) # qtype = 1 # if type == 'NS': # answer = labels2str(value.split(".")) # qtype = 2 # elif type == 'CNAME': # answer = labels2str(value.split(".")) # qtype = 5 # elif type == 'TXT': # answer = label2str(value) # qtype = 16 # elif type == 'MX': # preference, domain = value.split(":") # answer = struct.pack("!H", int(preference)) # answer += labels2str(domain.split(".")) # qtype = 15 # self._answers.setdefault(question, {}).setdefault(qtype, []).append(answer) # f.close() def isIP(self, host) : parts = host.split(".") if len(parts) != 4: return False try : valid = False for part in parts : intpart = int(part) if intpart <= 255 and intpart >= 0 : valid = True else : return False if valid : return True return False except : return False def get_response(self, query, domain, qtype, qclass, src_addr): #print query #print domain #print qtype #print qclass #print src_addr if qtype == 1: #answer = struct.pack("!I", ipstr2int(value)) reqtype = "A" if qtype == 2: #answer = labels2str(value.split(".")) reqtype = "NS" elif qtype == 5: #answer = labels2str(value.split(".")) reqtype = "CNAME" elif qtype == 16: #answer = label2str(value) reqtype = "TXT" elif qtype == 15: #preference, domain = value.split(":") #nswer = struct.pack("!H", int(preference)) #answer += labels2str(domain.split(".")) reqtype = "MX" elif qtype == 28: #answer = struct.pack("!I", ipstr2int(value)) reqtype = "AAAA" elif qtype == 52: reqtype = "TLSA" else : reqtype = None answers = app['services']['dns'].lookup({"query":query, "domain":domain, "qtype":qtype, "qclass":qclass, "src_addr":src_addr}) #print 'domain:', domain #print 'answers:', answers if domain.endswith(".bit") or domain.endswith(".tor") : #response = listdns.lookup(self.sp, {"query":query, "domain":domain, "qtype":qtype, "qclass":qclass, "src_addr":src_addr}) #response = self.sp.lookup({"query":query, "domain":domain, "qtype":qtype, "qclass":qclass, "src_addr":src_addr}) response = answers results = [] if type(response) == types.DictType : tempresults = {"qtype":response["type"], "qclass":response["class"], "ttl":response["ttl"]} if response["type"] == 1 : #if answers == [] : # return self.get_response(query, domain, 5, qclass, src_addr) tempresults["rdata"] = struct.pack("!I", ipstr2int(response["data"])) elif response["type"] == 2 or response["type"] == 5: tempresults["rdata"] = labels2str(response["data"].split(".")) elif response["type"] == 16 : tempresults["rdata"] = labels2str(response["data"]) elif response["type"] == 15 : tempresult = struct.pack("!H", response["data"][0]) tempresult += labels2str(response["data"][1].split(".")) tempresults["rdata"] = tempresult elif response["type"] == 28 : tempresults["rdata"] = response["data"] elif response["type"] == 52 : tempresult = '\x03\x00' tempresult += chr(int(response["data"][0][0])) tempresult += bytearray.fromhex(response["data"][0][1]) tempresults["rdata"] = tempresult #else : return 3, [] results.append(tempresults) return 0, results if type(response) == types.StringType : if self.isIP(response) : return 0, [{"qtype":1, "qclass":qclass, "ttl":300, "rdata":struct.pack("!I", ipstr2int(response))}] return 3, [] #if query not in self._answers: #return 3, [] #if qtype in self._answers[query]: #if domain == "sonicrules.bit": # results = [{'qtype': 1, 'qclass':qclass, 'ttl': 300, 'rdata': struct.pack("!I", ipstr2int(self.reqobj.req("sonicrules.org", qtype=1).answers[0]["data"]))}] # return 0, results #elif qtype == 1: # if they asked for an A record and we didn't find one, check for a CNAME #return self.get_response(query, domain, 5, qclass, src_addr) else: #server = self.servers[random.randrange(0, len(self.servers)-1)] #answers = self.reqobj.req(name=domain, qtype=qtype, server=server).answers results = [] for response in answers : tempresults = {"qtype":response["type"], "qclass":response["class"], "ttl":response["ttl"]} if response["type"] == 1 : if answers == [] : return self.get_response(query, domain, 5, qclass, src_addr) tempresults["rdata"] = struct.pack("!I", ipstr2int(response["data"])) elif response["type"] == 2 or response["type"] == 5: tempresults["rdata"] = labels2str(response["data"].split(".")) elif response["type"] == 16 : tempresults["rdata"] = labels2str(response["data"]) elif response["type"] == 15 : tempresult = struct.pack("!H", response["data"][0]) tempresult += labels2str(response["data"][1].split(".")) tempresults["rdata"] = tempresult elif response["type"] == 28 : if answers == [] : return self.get_response(query, domain, 5, qclass, src_addr) #tempresults["rdata"] = struct.pack("!I", ipstr2int(response["data"])) tempresults["rdata"] = response["data"] elif response["type"] == 52 : tempresults["rdata"] = response["data"] #else : return 3, [] results.append(tempresults) return 0, results return 3, []
46.936416
172
0.491995
import sys, os import DNS import rpcClient import struct, listdns, base64, types, json, random from utils import * from common import * class Source(object): def isIP(self, host) : parts = host.split(".") if len(parts) != 4: return False try : valid = False for part in parts : intpart = int(part) if intpart <= 255 and intpart >= 0 : valid = True else : return False if valid : return True return False except : return False def get_response(self, query, domain, qtype, qclass, src_addr): if qtype == 1: reqtype = "A" if qtype == 2: reqtype = "NS" elif qtype == 5: reqtype = "CNAME" elif qtype == 16: reqtype = "TXT" elif qtype == 15: reqtype = "MX" elif qtype == 28: reqtype = "AAAA" elif qtype == 52: reqtype = "TLSA" else : reqtype = None answers = app['services']['dns'].lookup({"query":query, "domain":domain, "qtype":qtype, "qclass":qclass, "src_addr":src_addr}) if domain.endswith(".bit") or domain.endswith(".tor") : response = answers results = [] if type(response) == types.DictType : tempresults = {"qtype":response["type"], "qclass":response["class"], "ttl":response["ttl"]} if response["type"] == 1 : tempresults["rdata"] = struct.pack("!I", ipstr2int(response["data"])) elif response["type"] == 2 or response["type"] == 5: tempresults["rdata"] = labels2str(response["data"].split(".")) elif response["type"] == 16 : tempresults["rdata"] = labels2str(response["data"]) elif response["type"] == 15 : tempresult = struct.pack("!H", response["data"][0]) tempresult += labels2str(response["data"][1].split(".")) tempresults["rdata"] = tempresult elif response["type"] == 28 : tempresults["rdata"] = response["data"] elif response["type"] == 52 : tempresult = '\x03\x00' tempresult += chr(int(response["data"][0][0])) tempresult += bytearray.fromhex(response["data"][0][1]) tempresults["rdata"] = tempresult results.append(tempresults) return 0, results if type(response) == types.StringType : if self.isIP(response) : return 0, [{"qtype":1, "qclass":qclass, "ttl":300, "rdata":struct.pack("!I", ipstr2int(response))}] return 3, [] #return self.get_response(query, domain, 5, qclass, src_addr) else: #server = self.servers[random.randrange(0, len(self.servers)-1)] #answers = self.reqobj.req(name=domain, qtype=qtype, server=server).answers results = [] for response in answers : tempresults = {"qtype":response["type"], "qclass":response["class"], "ttl":response["ttl"]} if response["type"] == 1 : if answers == [] : return self.get_response(query, domain, 5, qclass, src_addr) tempresults["rdata"] = struct.pack("!I", ipstr2int(response["data"])) elif response["type"] == 2 or response["type"] == 5: tempresults["rdata"] = labels2str(response["data"].split(".")) elif response["type"] == 16 : tempresults["rdata"] = labels2str(response["data"]) elif response["type"] == 15 : tempresult = struct.pack("!H", response["data"][0]) tempresult += labels2str(response["data"][1].split(".")) tempresults["rdata"] = tempresult elif response["type"] == 28 : if answers == [] : return self.get_response(query, domain, 5, qclass, src_addr) #tempresults["rdata"] = struct.pack("!I", ipstr2int(response["data"])) tempresults["rdata"] = response["data"] elif response["type"] == 52 : tempresults["rdata"] = response["data"] #else : return 3, [] results.append(tempresults) return 0, results return 3, []
true
true
f702939c992f164058c986345c72844ea2c3df0a
2,852
py
Python
tests/data_tests/writer_tests/json_writer_test.py
alueschow/polymatheia
e46a38b3686139bbab3a2fcfaa914d4ca938654e
[ "MIT" ]
3
2020-09-15T15:15:34.000Z
2021-06-15T10:35:07.000Z
tests/data_tests/writer_tests/json_writer_test.py
alueschow/polymatheia
e46a38b3686139bbab3a2fcfaa914d4ca938654e
[ "MIT" ]
7
2020-09-03T12:53:34.000Z
2020-10-05T09:14:29.000Z
tests/data_tests/writer_tests/json_writer_test.py
alueschow/polymatheia
e46a38b3686139bbab3a2fcfaa914d4ca938654e
[ "MIT" ]
2
2020-10-13T09:12:21.000Z
2021-04-15T14:19:06.000Z
"""Tests for the :mod:`~polymatheia.data.writer` package.""" import json import os from shutil import rmtree from polymatheia.data import NavigableDict from polymatheia.data.writer import JSONWriter DOCUMENTS = [NavigableDict(r) for r in [ { 'id': '1', 'name': { 'first': 'A', 'last': 'Person' }, 'age': 32, 'special tags': 'The first' }, { 'id': '2', 'name': { 'first': ['Another', {'abbr': 'Nameless'}], 'last': 'Parrot' }, 'age': 23, }, { 'id': '3', 'name': { 'first': 'The', 'last': 'Last' }, 'age': 65, }, ]] def test_local_json_writing(): """Test writing to the local filesystem.""" rmtree('tmp/json_writer_test', ignore_errors=True) writer = JSONWriter('tmp/json_writer_test', 'id') writer.write(DOCUMENTS) count = 0 for basepath, _, filenames in os.walk('tmp/json_writer_test'): for filename in filenames: if filename.endswith('.json'): count = count + len(filenames) with open(os.path.join(basepath, filename)) as in_f: doc = json.load(in_f) assert 'id' in doc assert 'name' in doc if doc['id'] == '2': assert 'first' in doc['name'] assert len(doc['name']['first']) == 2 else: assert 'first' in doc['name'] assert 'last' in doc['name'] assert 'age' in doc if doc['id'] == '1': assert 'special tags' in doc assert count == 3 def test_local_json_writing_pre_split_id_path(): """Test writing to the local filesystem.""" rmtree('tmp/json_writer_test', ignore_errors=True) writer = JSONWriter('tmp/json_writer_test', ['id']) writer.write(DOCUMENTS) count = 0 for basepath, _, filenames in os.walk('tmp/json_writer_test'): for filename in filenames: if filename.endswith('.json'): count = count + len(filenames) with open(os.path.join(basepath, filename)) as in_f: doc = json.load(in_f) assert 'id' in doc assert 'name' in doc if doc['id'] == '2': assert 'first' in doc['name'] assert len(doc['name']['first']) == 2 else: assert 'first' in doc['name'] assert 'last' in doc['name'] assert 'age' in doc if doc['id'] == '1': assert 'special tags' in doc assert count == 3
31.688889
68
0.471599
import json import os from shutil import rmtree from polymatheia.data import NavigableDict from polymatheia.data.writer import JSONWriter DOCUMENTS = [NavigableDict(r) for r in [ { 'id': '1', 'name': { 'first': 'A', 'last': 'Person' }, 'age': 32, 'special tags': 'The first' }, { 'id': '2', 'name': { 'first': ['Another', {'abbr': 'Nameless'}], 'last': 'Parrot' }, 'age': 23, }, { 'id': '3', 'name': { 'first': 'The', 'last': 'Last' }, 'age': 65, }, ]] def test_local_json_writing(): rmtree('tmp/json_writer_test', ignore_errors=True) writer = JSONWriter('tmp/json_writer_test', 'id') writer.write(DOCUMENTS) count = 0 for basepath, _, filenames in os.walk('tmp/json_writer_test'): for filename in filenames: if filename.endswith('.json'): count = count + len(filenames) with open(os.path.join(basepath, filename)) as in_f: doc = json.load(in_f) assert 'id' in doc assert 'name' in doc if doc['id'] == '2': assert 'first' in doc['name'] assert len(doc['name']['first']) == 2 else: assert 'first' in doc['name'] assert 'last' in doc['name'] assert 'age' in doc if doc['id'] == '1': assert 'special tags' in doc assert count == 3 def test_local_json_writing_pre_split_id_path(): rmtree('tmp/json_writer_test', ignore_errors=True) writer = JSONWriter('tmp/json_writer_test', ['id']) writer.write(DOCUMENTS) count = 0 for basepath, _, filenames in os.walk('tmp/json_writer_test'): for filename in filenames: if filename.endswith('.json'): count = count + len(filenames) with open(os.path.join(basepath, filename)) as in_f: doc = json.load(in_f) assert 'id' in doc assert 'name' in doc if doc['id'] == '2': assert 'first' in doc['name'] assert len(doc['name']['first']) == 2 else: assert 'first' in doc['name'] assert 'last' in doc['name'] assert 'age' in doc if doc['id'] == '1': assert 'special tags' in doc assert count == 3
true
true
f70293b9def8f86852689095cc552b5b73ffc04b
13,289
py
Python
tests/unit/registry_test.py
TUNE-Archive/freight_forwarder
6ea4a49f474ec04abb8bb81b175c774a16b5312f
[ "MIT" ]
null
null
null
tests/unit/registry_test.py
TUNE-Archive/freight_forwarder
6ea4a49f474ec04abb8bb81b175c774a16b5312f
[ "MIT" ]
null
null
null
tests/unit/registry_test.py
TUNE-Archive/freight_forwarder
6ea4a49f474ec04abb8bb81b175c774a16b5312f
[ "MIT" ]
null
null
null
# -*- coding: utf-8; -*- from __future__ import unicode_literals, absolute_import import json import requests import six from tests import unittest, mock from freight_forwarder.registry import Registry, V1, V2 from freight_forwarder.registry.registry_base import RegistryBase, RegistryException from ..factories.registry_factory import RegistryV1Factory, RegistryV2Factory class RegistryTest(unittest.TestCase): def setUp(self): pass def tearDown(self): pass @mock.patch.object(V1, '_validate_response', autospec=True, return_value=True) @mock.patch('freight_forwarder.registry.registry_base.requests', autospec=True) def test_registry_v1_init(self, mock_requests, mock_v1_validate): test_registry = Registry() self.assertIsInstance(test_registry, RegistryBase) self.assertEquals(test_registry.ping(), True) @mock.patch.object(V1, '_validate_response', name="v1_validate") @mock.patch.object(V2, '_validate_response', name="v2_validate") @mock.patch('freight_forwarder.registry.registry_base.requests', autospec=True) def test_registry_v2_init(self, mock_requests, mock_v2, mock_v1): mock_v1.side_effect = RegistryException("test") mock_v2.return_value = True test_v1_registry = RegistryV1Factory() test_v2_registry = RegistryV2Factory() # This is stated to ensure the test environment is setup correctly # validated v1.ping() returns an exception with self.assertRaises(RegistryException): test_v1_registry.ping() # validated v2.ping() returns an exception self.assertEquals(test_v2_registry.ping(), True) # Validate the logic of the registry class to return a V2 object test_registry = Registry(address="https://v2.dockertest.io") self.assertIsInstance(test_registry, RegistryBase) class RegistryV1Test(unittest.TestCase): def setUp(self): pass def tearDown(self): pass @mock.patch.object(V1, '_validate_response', return_value=True) @mock.patch.object(V1, '_request_builder') @mock.patch('freight_forwarder.registry.registry_base.requests', autospec=True) def test_v1_search(self, mock_requests, mock_request_builder, mock_validate_response): # Defined Search Request a search_response_content = { "num_results": 3, "query": "test", "results": [ {"description": "api test app", "name": "testproject/test-app"}, {"description": "database test app", "name": "testproject/test-db"}, {"description": "cache test app", "name": "testproject/test-cache"} ] } # Define Response Value for content once request has been validated mock_request_builder.return_value = create_response_object( url="https://search.registry.docker.com", status_code=200, content=json.dumps(search_response_content).encode('utf-8') ) # Define Default value for utils _validate_reponse mock_validate_response.return_value = True # Build V1 Factory Registry test_registry = RegistryV1Factory(address='https://search.registry.docker.com') results = test_registry.search("test") self.assertIsInstance(results, dict) @mock.patch.object(V1, '_validate_response', return_value=True) @mock.patch.object(V1, '_request_builder') @mock.patch('freight_forwarder.registry.registry_base.requests', autospec=True) def test_v1_tags(self, mock_requests, mock_request_builder, mock_validate_response): tag_response_content = { "0.1": "3fad19bfa2", "latest": "xxxxxxxxxx", "localtest": "xxxxxxxxxxxxxxae13", "redis123123": "xxxxxxxxxxxxxxae132", "jira1268": "xxxxxxxxxxxxxxae1324987" } formatted_output = [ 'appexample/test-app:0.1', 'appexample/test-app:latest', 'appexample/test-app:us-east-01-dev', 'appexample/test-app:localtest', 'appexample/test-app:redis123123', 'appexample/test-app:jira1268' ] mock_request_builder.return_value = create_response_object( url="https://tag.registry.docker.com", status_code=200, content=json.dumps(tag_response_content).encode('utf-8') ) mock_validate_response.return_value = True test_registry = RegistryV1Factory(address='https://tag.registry.docker.com') for tag in test_registry.tags("appexample/test-app"): tag_output = "".join(tag) self.assertIsInstance(tag_output, six.string_types) self.assertIn(tag_output, formatted_output) def test_delete_tag(self): self.skipTest("Implemented but not used") def test_delete(self): self.skipTest("Implemented but not used") def test_get_image_by_id(self): self.skipTest("Implemented but not used") def test_get_image_id_by_tag(self): self.skipTest("Implemented but not used") def set_image_tag(self): self.skipTest("Implemented but not used") class RegistryV2Test(unittest.TestCase): def setUp(self): pass def tearDown(self): pass @mock.patch.object(V2, '_validate_response', name='mock_v2_validate_response', return_value=True) @mock.patch.object(V2, '_request_builder', name='mock_v2_request_builder') @mock.patch('freight_forwarder.registry.registry_base.requests', autospec=True) def test_v2_search(self, mock_requests, mock_request_builder, mock_validate_response): # Defined Search Request search_response_content = json.dumps({"repositories": ["appexample/test-app", "appexample/test-db", "appexample/test-cache"]}).encode('utf-8') response = create_response_object(url="https://v2search.registry.docker.com", status_code=200, content=search_response_content) # Define Response Value for content once request has been validated mock_request_builder.return_value = response # Define Default value for utils _validate_response mock_validate_response.return_value = True # Build V1 Factory Registry test_registry = RegistryV2Factory(address='https://v2search.registry.docker.com') test_registry.search("test") for search in test_registry.search("test"): search_output = "".join(search) self.assertIsInstance(search_output, six.string_types) @mock.patch.object(V2, '_validate_response', name='mock_v2_validate_response', return_value=True) @mock.patch.object(V2, '_request_builder', name='mock_v2_request_builder') @mock.patch('freight_forwarder.registry.registry_base.requests', autospec=True) def test_v2_tags(self, mock_requests, mock_request_builder, mock_validate_response): tag_response_content = json.dumps({"name": "appexample/test-app", "tags": [ "latest", "0.0.15", "asdfasb81"] } ).encode('utf-8') formatted_output = ['appexample/test-app:latest', 'appexample/test-app:0.0.15', 'appexample/test-app:asdfasb81'] response = create_response_object(url="https://v2tags.registry.docker.com", status_code=200, content=tag_response_content) mock_request_builder.return_value = response mock_validate_response.return_value = True test_registry = RegistryV2Factory(address='https://v2tags.registry.docker.com') for tags in test_registry.tags("appexample/test-app"): tag_output = "".join(tags) self.assertIsInstance(tag_output, six.string_types) self.assertIn(tag_output, formatted_output) def test_blobs(self): self.skipTest("Not implemented") def test_catalog(self, count=None, last=None): self.skipTest("Not implemented") def test_manifests(self): self.skipTest("Not implemented") class RegistryBaseTests(unittest.TestCase): def setUp(self): self.patch_requests = mock.patch('freight_forwarder.registry.registry_base.requests', autospec=True) self.patch_requests.start() self.test_registry = RegistryV1Factory(address="https://registrybasetest.docker.com") def tearDown(self): self.patch_requests.stop() del self.test_registry def test_ping(self): self.skipTest("Defined as abc method. Override in class") def test_tags(self): self.skipTest("Defined as abc method. Override in class") def test_init(self): self.assertEquals(self.test_registry.scheme, 'https://') self.assertEquals(self.test_registry.location, 'registrybasetest.docker.com') self.assertEquals(self.test_registry.auth, None) self.assertEquals(self.test_registry.__str__(), "https://registrybasetest.docker.com") self.assertIsInstance(self.test_registry, RegistryBase) def test_registry_base_auth_base_functionality(self): self.assertEquals(self.test_registry.auth, None) with self.assertRaises(TypeError): self.test_registry.auth = ["user=test_user", "passwd=password"] def test_registry_base_auth_with_auth(self): pass class RegistryExceptionTest(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_exception_with_status_code_and_url(self): response = create_response_object(url="https://bad.docker.io", status_code=503, content={"test": "data"}) registry_exception = RegistryException(response) self.assertIsInstance(registry_exception, RegistryException) self.assertEquals(registry_exception.response.status_code, 503) def test_exception_with_no_content(self): response = create_response_object(url="https://nocontent.docker.io", status_code=503) registry_exception = RegistryException(response) self.assertIsInstance(registry_exception, RegistryException) self.assertEquals(registry_exception.message, 'There was an issue with the request to the docker registry.') def test_exception_with_error_content(self): # TODO - grab a properly formatted error for testing response = create_response_object(url="https://errorcontent.docker.io", status_code=500, content=json.dumps({'error': 'Docker Registry Error Example'})) registry_exception = RegistryException(response) self.assertIsInstance(registry_exception, RegistryException) self.assertEquals(registry_exception.message, 'Docker Registry Error Example') # Test the class.__str__ MagicMethod self.assertEquals("{0}".format(registry_exception), 'Docker Registry Error Example') def create_response_object(url, status_code, content=None): """ The function generates a mock object that is properly formatted for the RegistryException and validates the input :param url: url to pass through for the mock request object :param status_code: status code to append to the response object :param content: **required** if not provided, this attribute will be blocked :return: Parent Mock: request.Reponse Child Mock: request - requests.PreparedRequest """ if not isinstance(url, six.string_types): raise(TypeError("incorrect type provided for url")) if not isinstance(status_code, six.integer_types): raise(TypeError("incorrect type provided for http status code")) mock_object_request = mock.MagicMock(spec=requests.PreparedRequest, url=url) mock_object_response = mock.MagicMock(spec=requests.Response, request=mock_object_request) mock_object_response.status_code = status_code if content: mock_object_response.content = content else: # this blocks the content attribute from being present del mock_object_response.content return mock_object_response def format_image_results(registry_response_dict): """ Response attribute content is formatted correctly for the Images :param response: response object with content attribute :return: dict of various images """ if not isinstance(registry_response_dict, dict): raise TypeError('registry_response_dict must be a dict.') images = {} results = registry_response_dict.get('results') if results: for image in results: images[image.get('name')] = image return images
40.889231
117
0.658063
from __future__ import unicode_literals, absolute_import import json import requests import six from tests import unittest, mock from freight_forwarder.registry import Registry, V1, V2 from freight_forwarder.registry.registry_base import RegistryBase, RegistryException from ..factories.registry_factory import RegistryV1Factory, RegistryV2Factory class RegistryTest(unittest.TestCase): def setUp(self): pass def tearDown(self): pass @mock.patch.object(V1, '_validate_response', autospec=True, return_value=True) @mock.patch('freight_forwarder.registry.registry_base.requests', autospec=True) def test_registry_v1_init(self, mock_requests, mock_v1_validate): test_registry = Registry() self.assertIsInstance(test_registry, RegistryBase) self.assertEquals(test_registry.ping(), True) @mock.patch.object(V1, '_validate_response', name="v1_validate") @mock.patch.object(V2, '_validate_response', name="v2_validate") @mock.patch('freight_forwarder.registry.registry_base.requests', autospec=True) def test_registry_v2_init(self, mock_requests, mock_v2, mock_v1): mock_v1.side_effect = RegistryException("test") mock_v2.return_value = True test_v1_registry = RegistryV1Factory() test_v2_registry = RegistryV2Factory() with self.assertRaises(RegistryException): test_v1_registry.ping() self.assertEquals(test_v2_registry.ping(), True) test_registry = Registry(address="https://v2.dockertest.io") self.assertIsInstance(test_registry, RegistryBase) class RegistryV1Test(unittest.TestCase): def setUp(self): pass def tearDown(self): pass @mock.patch.object(V1, '_validate_response', return_value=True) @mock.patch.object(V1, '_request_builder') @mock.patch('freight_forwarder.registry.registry_base.requests', autospec=True) def test_v1_search(self, mock_requests, mock_request_builder, mock_validate_response): search_response_content = { "num_results": 3, "query": "test", "results": [ {"description": "api test app", "name": "testproject/test-app"}, {"description": "database test app", "name": "testproject/test-db"}, {"description": "cache test app", "name": "testproject/test-cache"} ] } mock_request_builder.return_value = create_response_object( url="https://search.registry.docker.com", status_code=200, content=json.dumps(search_response_content).encode('utf-8') ) mock_validate_response.return_value = True test_registry = RegistryV1Factory(address='https://search.registry.docker.com') results = test_registry.search("test") self.assertIsInstance(results, dict) @mock.patch.object(V1, '_validate_response', return_value=True) @mock.patch.object(V1, '_request_builder') @mock.patch('freight_forwarder.registry.registry_base.requests', autospec=True) def test_v1_tags(self, mock_requests, mock_request_builder, mock_validate_response): tag_response_content = { "0.1": "3fad19bfa2", "latest": "xxxxxxxxxx", "localtest": "xxxxxxxxxxxxxxae13", "redis123123": "xxxxxxxxxxxxxxae132", "jira1268": "xxxxxxxxxxxxxxae1324987" } formatted_output = [ 'appexample/test-app:0.1', 'appexample/test-app:latest', 'appexample/test-app:us-east-01-dev', 'appexample/test-app:localtest', 'appexample/test-app:redis123123', 'appexample/test-app:jira1268' ] mock_request_builder.return_value = create_response_object( url="https://tag.registry.docker.com", status_code=200, content=json.dumps(tag_response_content).encode('utf-8') ) mock_validate_response.return_value = True test_registry = RegistryV1Factory(address='https://tag.registry.docker.com') for tag in test_registry.tags("appexample/test-app"): tag_output = "".join(tag) self.assertIsInstance(tag_output, six.string_types) self.assertIn(tag_output, formatted_output) def test_delete_tag(self): self.skipTest("Implemented but not used") def test_delete(self): self.skipTest("Implemented but not used") def test_get_image_by_id(self): self.skipTest("Implemented but not used") def test_get_image_id_by_tag(self): self.skipTest("Implemented but not used") def set_image_tag(self): self.skipTest("Implemented but not used") class RegistryV2Test(unittest.TestCase): def setUp(self): pass def tearDown(self): pass @mock.patch.object(V2, '_validate_response', name='mock_v2_validate_response', return_value=True) @mock.patch.object(V2, '_request_builder', name='mock_v2_request_builder') @mock.patch('freight_forwarder.registry.registry_base.requests', autospec=True) def test_v2_search(self, mock_requests, mock_request_builder, mock_validate_response): search_response_content = json.dumps({"repositories": ["appexample/test-app", "appexample/test-db", "appexample/test-cache"]}).encode('utf-8') response = create_response_object(url="https://v2search.registry.docker.com", status_code=200, content=search_response_content) mock_request_builder.return_value = response mock_validate_response.return_value = True test_registry = RegistryV2Factory(address='https://v2search.registry.docker.com') test_registry.search("test") for search in test_registry.search("test"): search_output = "".join(search) self.assertIsInstance(search_output, six.string_types) @mock.patch.object(V2, '_validate_response', name='mock_v2_validate_response', return_value=True) @mock.patch.object(V2, '_request_builder', name='mock_v2_request_builder') @mock.patch('freight_forwarder.registry.registry_base.requests', autospec=True) def test_v2_tags(self, mock_requests, mock_request_builder, mock_validate_response): tag_response_content = json.dumps({"name": "appexample/test-app", "tags": [ "latest", "0.0.15", "asdfasb81"] } ).encode('utf-8') formatted_output = ['appexample/test-app:latest', 'appexample/test-app:0.0.15', 'appexample/test-app:asdfasb81'] response = create_response_object(url="https://v2tags.registry.docker.com", status_code=200, content=tag_response_content) mock_request_builder.return_value = response mock_validate_response.return_value = True test_registry = RegistryV2Factory(address='https://v2tags.registry.docker.com') for tags in test_registry.tags("appexample/test-app"): tag_output = "".join(tags) self.assertIsInstance(tag_output, six.string_types) self.assertIn(tag_output, formatted_output) def test_blobs(self): self.skipTest("Not implemented") def test_catalog(self, count=None, last=None): self.skipTest("Not implemented") def test_manifests(self): self.skipTest("Not implemented") class RegistryBaseTests(unittest.TestCase): def setUp(self): self.patch_requests = mock.patch('freight_forwarder.registry.registry_base.requests', autospec=True) self.patch_requests.start() self.test_registry = RegistryV1Factory(address="https://registrybasetest.docker.com") def tearDown(self): self.patch_requests.stop() del self.test_registry def test_ping(self): self.skipTest("Defined as abc method. Override in class") def test_tags(self): self.skipTest("Defined as abc method. Override in class") def test_init(self): self.assertEquals(self.test_registry.scheme, 'https://') self.assertEquals(self.test_registry.location, 'registrybasetest.docker.com') self.assertEquals(self.test_registry.auth, None) self.assertEquals(self.test_registry.__str__(), "https://registrybasetest.docker.com") self.assertIsInstance(self.test_registry, RegistryBase) def test_registry_base_auth_base_functionality(self): self.assertEquals(self.test_registry.auth, None) with self.assertRaises(TypeError): self.test_registry.auth = ["user=test_user", "passwd=password"] def test_registry_base_auth_with_auth(self): pass class RegistryExceptionTest(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_exception_with_status_code_and_url(self): response = create_response_object(url="https://bad.docker.io", status_code=503, content={"test": "data"}) registry_exception = RegistryException(response) self.assertIsInstance(registry_exception, RegistryException) self.assertEquals(registry_exception.response.status_code, 503) def test_exception_with_no_content(self): response = create_response_object(url="https://nocontent.docker.io", status_code=503) registry_exception = RegistryException(response) self.assertIsInstance(registry_exception, RegistryException) self.assertEquals(registry_exception.message, 'There was an issue with the request to the docker registry.') def test_exception_with_error_content(self): response = create_response_object(url="https://errorcontent.docker.io", status_code=500, content=json.dumps({'error': 'Docker Registry Error Example'})) registry_exception = RegistryException(response) self.assertIsInstance(registry_exception, RegistryException) self.assertEquals(registry_exception.message, 'Docker Registry Error Example') self.assertEquals("{0}".format(registry_exception), 'Docker Registry Error Example') def create_response_object(url, status_code, content=None): if not isinstance(url, six.string_types): raise(TypeError("incorrect type provided for url")) if not isinstance(status_code, six.integer_types): raise(TypeError("incorrect type provided for http status code")) mock_object_request = mock.MagicMock(spec=requests.PreparedRequest, url=url) mock_object_response = mock.MagicMock(spec=requests.Response, request=mock_object_request) mock_object_response.status_code = status_code if content: mock_object_response.content = content else: del mock_object_response.content return mock_object_response def format_image_results(registry_response_dict): if not isinstance(registry_response_dict, dict): raise TypeError('registry_response_dict must be a dict.') images = {} results = registry_response_dict.get('results') if results: for image in results: images[image.get('name')] = image return images
true
true
f7029408ce730e634db3b1bb394319bb0bf2ed6b
127
py
Python
icevision/models/ross/efficientdet/loss_fn.py
ai-fast-track/mantisshrimp
cc6d6a4a048f6ddda2782b6593dcd6b083a673e4
[ "Apache-2.0" ]
580
2020-09-10T06:29:57.000Z
2022-03-29T19:34:54.000Z
icevision/models/ross/efficientdet/loss_fn.py
ai-fast-track/mantisshrimp
cc6d6a4a048f6ddda2782b6593dcd6b083a673e4
[ "Apache-2.0" ]
691
2020-09-05T03:08:34.000Z
2022-03-31T23:47:06.000Z
icevision/models/ross/efficientdet/loss_fn.py
lgvaz/mantisshrimp2
743cb7df0dae7eb1331fc2bb66fc9ca09db496cd
[ "Apache-2.0" ]
105
2020-09-09T10:41:35.000Z
2022-03-25T17:16:49.000Z
__all__ = ["loss_fn"] from icevision.imports import * def loss_fn(preds, targets) -> torch.Tensor: return preds["loss"]
15.875
44
0.692913
__all__ = ["loss_fn"] from icevision.imports import * def loss_fn(preds, targets) -> torch.Tensor: return preds["loss"]
true
true
f702941de4e407f4a88237e388c77641c7894dd9
416
py
Python
blog/migrations/0008_alter_post_content.py
samwel2000/portfolio-backend
a7cb50ffa34b9db64bc4532a9d79df0b082fd000
[ "MIT" ]
null
null
null
blog/migrations/0008_alter_post_content.py
samwel2000/portfolio-backend
a7cb50ffa34b9db64bc4532a9d79df0b082fd000
[ "MIT" ]
null
null
null
blog/migrations/0008_alter_post_content.py
samwel2000/portfolio-backend
a7cb50ffa34b9db64bc4532a9d79df0b082fd000
[ "MIT" ]
null
null
null
# Generated by Django 3.2.4 on 2021-09-11 12:44 import ckeditor_uploader.fields from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('blog', '0007_subscriber'), ] operations = [ migrations.AlterField( model_name='post', name='content', field=ckeditor_uploader.fields.RichTextUploadingField(), ), ]
20.8
68
0.625
import ckeditor_uploader.fields from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('blog', '0007_subscriber'), ] operations = [ migrations.AlterField( model_name='post', name='content', field=ckeditor_uploader.fields.RichTextUploadingField(), ), ]
true
true
f702949d7e5cbdf335ceb60f0431804430f5686c
2,126
py
Python
tests/test_decorators.py
marwahaha/QSpectra
328a4f78af1473d65c011eb99b903c7f0ef1db32
[ "BSD-2-Clause" ]
14
2018-02-01T03:06:26.000Z
2021-09-16T02:50:31.000Z
tests/test_decorators.py
whaley-group-berkeley/qspectra
f0f971ea26c4595a9c7bd4ee98e31e9ba63451c6
[ "BSD-2-Clause" ]
5
2018-01-02T20:33:14.000Z
2021-11-12T23:59:20.000Z
tests/test_decorators.py
whaley-group-berkeley/qspectra
f0f971ea26c4595a9c7bd4ee98e31e9ba63451c6
[ "BSD-2-Clause" ]
7
2018-04-19T05:08:41.000Z
2021-09-16T01:45:24.000Z
import unittest from numpy.testing import assert_allclose from qspectra import polarization from qspectra.simulate import decorators class TestGetCallArgs(unittest.TestCase): def test(self): self.assertEqual( decorators._get_call_args(lambda a: None, 1), {'a': 1}) self.assertEqual( decorators._get_call_args(lambda a, **b: None, 1), {'a': 1}) self.assertEqual( decorators._get_call_args(lambda a, **b: None, a=1, c=2), {'a': 1, 'c': 2}) self.assertEqual( decorators._get_call_args(lambda **b: None, a=1, c=2), {'a': 1, 'c': 2}) with self.assertRaises(NotImplementedError): decorators._get_call_args(lambda *a: None, 1, 2, 3) class TestIsotropicAverage(unittest.TestCase): def test_optional_2nd_order_isotropic_average(self): binary = {'xx': 1, 'yy': 2, 'zz': 4} f = decorators.optional_2nd_order_isotropic_average( lambda polarization: (0, binary[polarization])) assert_allclose(f('xx'), (0, 1)) assert_allclose(f('xx', exact_isotropic_average=False), (0, 1)) assert_allclose(f('xx', exact_isotropic_average=True), (0, 7 / 3.0)) assert_allclose(f('xy', exact_isotropic_average=True), (0, 0)) with self.assertRaises(ValueError): # wrong number of polarizations f('xyz', exact_isotropic_average=True) def test_optional_4th_order_isotropic_average(self): binary = {'xx': 1, 'yy': 2, 'zz': 4} f = decorators.optional_4th_order_isotropic_average( lambda polarization: (0, binary[polarization[:2]] + 10 * binary[polarization[2:]])) assert_allclose(f('xxxx'), (0, 11)) ma = polarization.MAGIC_ANGLE assert_allclose(f([0, 0, ma, ma], exact_isotropic_average=True), (0, (11 + 12 + 14 + 21 + 22 + 24 + 41 + 42 + 44) / 9.0)) with self.assertRaises(ValueError): # wrong number of polarizations f('xyz', exact_isotropic_average=True)
41.686275
80
0.601129
import unittest from numpy.testing import assert_allclose from qspectra import polarization from qspectra.simulate import decorators class TestGetCallArgs(unittest.TestCase): def test(self): self.assertEqual( decorators._get_call_args(lambda a: None, 1), {'a': 1}) self.assertEqual( decorators._get_call_args(lambda a, **b: None, 1), {'a': 1}) self.assertEqual( decorators._get_call_args(lambda a, **b: None, a=1, c=2), {'a': 1, 'c': 2}) self.assertEqual( decorators._get_call_args(lambda **b: None, a=1, c=2), {'a': 1, 'c': 2}) with self.assertRaises(NotImplementedError): decorators._get_call_args(lambda *a: None, 1, 2, 3) class TestIsotropicAverage(unittest.TestCase): def test_optional_2nd_order_isotropic_average(self): binary = {'xx': 1, 'yy': 2, 'zz': 4} f = decorators.optional_2nd_order_isotropic_average( lambda polarization: (0, binary[polarization])) assert_allclose(f('xx'), (0, 1)) assert_allclose(f('xx', exact_isotropic_average=False), (0, 1)) assert_allclose(f('xx', exact_isotropic_average=True), (0, 7 / 3.0)) assert_allclose(f('xy', exact_isotropic_average=True), (0, 0)) with self.assertRaises(ValueError): f('xyz', exact_isotropic_average=True) def test_optional_4th_order_isotropic_average(self): binary = {'xx': 1, 'yy': 2, 'zz': 4} f = decorators.optional_4th_order_isotropic_average( lambda polarization: (0, binary[polarization[:2]] + 10 * binary[polarization[2:]])) assert_allclose(f('xxxx'), (0, 11)) ma = polarization.MAGIC_ANGLE assert_allclose(f([0, 0, ma, ma], exact_isotropic_average=True), (0, (11 + 12 + 14 + 21 + 22 + 24 + 41 + 42 + 44) / 9.0)) with self.assertRaises(ValueError): f('xyz', exact_isotropic_average=True)
true
true
f70295f364c9fa7735e9cab7413024d68698092b
64
py
Python
enthought/traits/ui/editors/check_list_editor.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
3
2016-12-09T06:05:18.000Z
2018-03-01T13:00:29.000Z
enthought/traits/ui/editors/check_list_editor.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
1
2020-12-02T00:51:32.000Z
2020-12-02T08:48:55.000Z
enthought/traits/ui/editors/check_list_editor.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
null
null
null
# proxy module from traitsui.editors.check_list_editor import *
21.333333
48
0.828125
from traitsui.editors.check_list_editor import *
true
true
f70296d97876b7039d6cd67bbecc249889c24636
4,957
py
Python
dataset_generation/ExtractSantini.py
YuHsin1998/EllSeg
91a532650ef809eef081a7ef9af5f1940fb37a37
[ "MIT" ]
1
2021-05-26T05:45:42.000Z
2021-05-26T05:45:42.000Z
dataset_generation/ExtractSantini.py
xiaohuaibaoguigui/EllSeg
ff56b255f8e650856aec9af23792e105897eba5c
[ "MIT" ]
null
null
null
dataset_generation/ExtractSantini.py
xiaohuaibaoguigui/EllSeg
ff56b255f8e650856aec9af23792e105897eba5c
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed May 29 16:16:57 2019 @author: rakshit """ import os import cv2 import argparse import matplotlib import numpy as np import deepdish as dd import scipy.io as scio print('Extracting Santini') parser = argparse.ArgumentParser() parser.add_argument('--noDisp', help='Specify flag to display labelled images', type=int) parser.add_argument('--path2ds', help='Path to dataset', type=str) args = parser.parse_args() if args.noDisp: noDisp = True print('No graphics') else: noDisp = False print('Showing figures') gui_env = ['Qt5Agg','WXAgg','TKAgg','GTKAgg'] for gui in gui_env: try: print("testing: {}".format(gui)) matplotlib.use(gui,warn=False, force=True) from matplotlib import pyplot as plt break except: continue print("Using: {}".format(matplotlib.get_backend())) plt.ion() args.path2ds = '/media/rakshit/tank/Dataset' PATH_DIR = os.path.join(args.path2ds, 'Santini') PATH_DS = os.path.join(args.path2ds, 'All') PATH_MASTER = os.path.join(args.path2ds, 'MasterKey') list_ds = ['1', '2', '3', '4', '5', '6'] sc = (640.0/384.0) Image_counter = 0.0 ds_num = 24 def mypause(interval): backend = plt.rcParams['backend'] if backend in matplotlib.rcsetup.interactive_bk: figManager = matplotlib._pylab_helpers.Gcf.get_active() if figManager is not None: canvas = figManager.canvas if canvas.figure.stale: canvas.draw() canvas.start_event_loop(interval) return def fix_pupil_loc(p, res): # res: [H, W] p[0] = 0.5*p[0] p[1] = res[0] - 0.5*p[1] return p def readFormattedText(path2file, ignoreLines): data = [] count = 0 f = open(path2file, 'r') for line in f: d = [int(d) for d in line.split() if d.isdigit()] count = count + 1 if d and count > ignoreLines: data.append(d) f.close() return data for name in list_ds: # Ignore the first row and column. # Columns: [index, p_x, p_y] opts = os.listdir(os.path.join(PATH_DIR, name)) for subdir in opts: PATH_DATA = os.path.join(PATH_DIR, name, subdir) # Read pupil data Path2text = os.path.join(PATH_DATA, 'journal-{:04d}.txt'.format(int(subdir)-1)) Path2vid = os.path.join(PATH_DATA, 'eye-{:04d}-0000.avi'.format(int(subdir)-1)) PupilData = np.array(readFormattedText(Path2text, 2)) VidObj = cv2.VideoCapture(Path2vid) keydict = {k:[] for k in ['pupil_loc', 'archive', 'data_type', 'resolution', 'dataset', 'subset']} # Generate empty dictionaries keydict['data_type'] = 0 # Only pupil center available keydict['resolution'] = [] keydict['dataset'] = 'Santini' keydict['subset'] = '{}-{}'.format(name, subdir) # Create an empty dictionary as per agreed structure Data = {k:[] for k in ['Images', 'Info', 'Masks', 'Masks_noSkin', 'Fits', 'pupil_loc']} Data['Fits'] = {k:[] for k in ['pupil', 'pupil_norm', 'pupil_phi', 'iris', 'iris_norm', 'iris_phi']} if not noDisp: fig, plts = plt.subplots(1,1) fr_num = 0 while(VidObj.isOpened()): ret, I = VidObj.read() if ret == True: I = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY) I = cv2.resize(I, (640, 480), cv2.INTER_LANCZOS4) Data['Images'].append(I) keydict['resolution'].append(I.shape) keydict['archive'].append(ds_num) pupil_loc = fix_pupil_loc(PupilData[fr_num, 10:12]*sc, I.shape) keydict['pupil_loc'].append(pupil_loc) Data['pupil_loc'].append(pupil_loc) Data['Info'].append(str(fr_num)) fr_num+=1 Image_counter+=1 if not noDisp: if fr_num == 1: cI = plts.imshow(I) cX = plts.scatter(pupil_loc[0], pupil_loc[1]) plt.show() plt.pause(.01) else: newLoc = np.array([pupil_loc[0], pupil_loc[1]]) cI.set_data(I) cX.set_offsets(newLoc) mypause(0.01) else: # No more frames to load break Data['Images'] = np.stack(Data['Images'], axis=0) Data['pupil_loc'] = np.stack(Data['pupil_loc'], axis=0) keydict['pupil_loc'] = np.stack(keydict['pupil_loc'], axis=0) keydict['resolution'] = np.stack(keydict['resolution'], axis=0) keydict['archive'] = np.stack(keydict['archive'], axis=0) # Save out data dd.io.save(os.path.join(PATH_DS, str(ds_num)+'.h5'), Data) scio.savemat(os.path.join(PATH_MASTER, str(ds_num)), keydict, appendmat=True) ds_num=ds_num+1
33.046667
108
0.571717
import os import cv2 import argparse import matplotlib import numpy as np import deepdish as dd import scipy.io as scio print('Extracting Santini') parser = argparse.ArgumentParser() parser.add_argument('--noDisp', help='Specify flag to display labelled images', type=int) parser.add_argument('--path2ds', help='Path to dataset', type=str) args = parser.parse_args() if args.noDisp: noDisp = True print('No graphics') else: noDisp = False print('Showing figures') gui_env = ['Qt5Agg','WXAgg','TKAgg','GTKAgg'] for gui in gui_env: try: print("testing: {}".format(gui)) matplotlib.use(gui,warn=False, force=True) from matplotlib import pyplot as plt break except: continue print("Using: {}".format(matplotlib.get_backend())) plt.ion() args.path2ds = '/media/rakshit/tank/Dataset' PATH_DIR = os.path.join(args.path2ds, 'Santini') PATH_DS = os.path.join(args.path2ds, 'All') PATH_MASTER = os.path.join(args.path2ds, 'MasterKey') list_ds = ['1', '2', '3', '4', '5', '6'] sc = (640.0/384.0) Image_counter = 0.0 ds_num = 24 def mypause(interval): backend = plt.rcParams['backend'] if backend in matplotlib.rcsetup.interactive_bk: figManager = matplotlib._pylab_helpers.Gcf.get_active() if figManager is not None: canvas = figManager.canvas if canvas.figure.stale: canvas.draw() canvas.start_event_loop(interval) return def fix_pupil_loc(p, res): p[0] = 0.5*p[0] p[1] = res[0] - 0.5*p[1] return p def readFormattedText(path2file, ignoreLines): data = [] count = 0 f = open(path2file, 'r') for line in f: d = [int(d) for d in line.split() if d.isdigit()] count = count + 1 if d and count > ignoreLines: data.append(d) f.close() return data for name in list_ds: opts = os.listdir(os.path.join(PATH_DIR, name)) for subdir in opts: PATH_DATA = os.path.join(PATH_DIR, name, subdir) Path2text = os.path.join(PATH_DATA, 'journal-{:04d}.txt'.format(int(subdir)-1)) Path2vid = os.path.join(PATH_DATA, 'eye-{:04d}-0000.avi'.format(int(subdir)-1)) PupilData = np.array(readFormattedText(Path2text, 2)) VidObj = cv2.VideoCapture(Path2vid) keydict = {k:[] for k in ['pupil_loc', 'archive', 'data_type', 'resolution', 'dataset', 'subset']} keydict['data_type'] = 0 keydict['resolution'] = [] keydict['dataset'] = 'Santini' keydict['subset'] = '{}-{}'.format(name, subdir) Data = {k:[] for k in ['Images', 'Info', 'Masks', 'Masks_noSkin', 'Fits', 'pupil_loc']} Data['Fits'] = {k:[] for k in ['pupil', 'pupil_norm', 'pupil_phi', 'iris', 'iris_norm', 'iris_phi']} if not noDisp: fig, plts = plt.subplots(1,1) fr_num = 0 while(VidObj.isOpened()): ret, I = VidObj.read() if ret == True: I = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY) I = cv2.resize(I, (640, 480), cv2.INTER_LANCZOS4) Data['Images'].append(I) keydict['resolution'].append(I.shape) keydict['archive'].append(ds_num) pupil_loc = fix_pupil_loc(PupilData[fr_num, 10:12]*sc, I.shape) keydict['pupil_loc'].append(pupil_loc) Data['pupil_loc'].append(pupil_loc) Data['Info'].append(str(fr_num)) fr_num+=1 Image_counter+=1 if not noDisp: if fr_num == 1: cI = plts.imshow(I) cX = plts.scatter(pupil_loc[0], pupil_loc[1]) plt.show() plt.pause(.01) else: newLoc = np.array([pupil_loc[0], pupil_loc[1]]) cI.set_data(I) cX.set_offsets(newLoc) mypause(0.01) else: break Data['Images'] = np.stack(Data['Images'], axis=0) Data['pupil_loc'] = np.stack(Data['pupil_loc'], axis=0) keydict['pupil_loc'] = np.stack(keydict['pupil_loc'], axis=0) keydict['resolution'] = np.stack(keydict['resolution'], axis=0) keydict['archive'] = np.stack(keydict['archive'], axis=0) dd.io.save(os.path.join(PATH_DS, str(ds_num)+'.h5'), Data) scio.savemat(os.path.join(PATH_MASTER, str(ds_num)), keydict, appendmat=True) ds_num=ds_num+1
true
true
f70296f7f9d79a7e89f869920b8a89f1d9105870
38,097
py
Python
arkalos/app/arkalos_views/arkalos_common.py
kantale/arkalos
0e8776a2458b6be79ceea8d78c91fe8c067df50a
[ "AFL-3.0" ]
null
null
null
arkalos/app/arkalos_views/arkalos_common.py
kantale/arkalos
0e8776a2458b6be79ceea8d78c91fe8c067df50a
[ "AFL-3.0" ]
1
2017-06-20T13:59:24.000Z
2017-06-20T13:59:24.000Z
arkalos/app/arkalos_views/arkalos_common.py
kantale/arkalos
0e8776a2458b6be79ceea8d78c91fe8c067df50a
[ "AFL-3.0" ]
null
null
null
from django.http import HttpResponse from django.contrib.auth.models import User from django.contrib.auth import authenticate, login, logout from django.shortcuts import redirect from django.core.validators import URLValidator # https://stackoverflow.com/questions/7160737/python-how-to-validate-a-url-in-python-malformed-or-not from django.core.exceptions import ValidationError, ObjectDoesNotExist from django.db.models import Max, Count from app.models import Reference, Tools, Reports, Tasks, TasksStats import io import re import six import uuid import hashlib import simplejson #https://pybtex.org/ from pybtex.database import parse_string as parse_reference_string import pybtex.database.input.bibtex import pybtex.plugin # Globals pybtex_style = pybtex.plugin.find_plugin('pybtex.style.formatting', 'plain')() pybtex_html_backend = pybtex.plugin.find_plugin('pybtex.backends', 'html')() pybtex_parser = pybtex.database.input.bibtex.Parser() sep = '||' sep2 = '@@' format_time_string = '%a, %d %b %Y %H:%M:%S' # RFC 2822 Internet email standard. https://docs.python.org/2/library/time.html#time.strftime # '%Y-%m-%d, %H:%M:%S' url_validator = URLValidator() # https://stackoverflow.com/questions/7160737/python-how-to-validate-a-url-in-python-malformed-or-not class ArkalosException(Exception): pass def get_guid(): ''' Create a new guid ''' return str(uuid.uuid4()) def get_user_id(request): ''' Get id of user ''' is_authenticated = request.user.is_authenticated() if is_authenticated: return request.user.id return None def get_user(request): ''' Get user object ''' is_authenticated = request.user.is_authenticated() if is_authenticated: return request.user return None def fail(error_message=None): ''' Failed AJAX request ''' ret = {'success': False, 'error_message': error_message} json = simplejson.dumps(ret) return HttpResponse(json, content_type='application/json') def success(data={}): ''' success Ajax request ''' data['success'] = True json = simplejson.dumps(data) return HttpResponse(json, content_type='application/json') def has_data(f): ''' Decorator that passes AJAX data to a function parameters ''' def wrapper(*args, **kwargs): request = args[0] if request.method == 'POST': if len(request.POST): for k in request.POST: kwargs[k] = request.POST[k] else: POST = simplejson.loads(request.body) for k in POST: kwargs[k] = POST[k] elif request.method == 'GET': for k in request.GET: kwargs[k] = request.GET[k] print ("GET: {} == {}".format(k, kwargs[k])) return f(*args, **kwargs) return wrapper def has_field(field_names, errors): ''' Check if field names are present field_name: The field to check ''' def decorator(f): def wrapper(*args, **kwargs): for field_index, field_name in enumerate(field_names): if not field_name in kwargs: if callable(errors): kwargs['error'] = errors(field_name) elif type(errors) is list: kwargs['error'] = errors[field_index] elif type(errors) is dict: kwargs['error'] = errors[field_name] elif type(errors) is str: kwargs['error'] = errors else: # This should never happen raise ArkalosException('Unknown error type: {}'.format(type(error).__name__)) return f(*args, **kwargs) return f(*args, **kwargs) return wrapper return decorator def has_error(f): ''' Check if error in kwargs ''' def wrapper(*args, **kwargs): if 'error' in kwargs: return fail(kwargs['error']) return f(*args, **kwargs) return wrapper def username_exists(username): ''' Checks if a username exists ''' return User.objects.filter(username=username).exists() def URL_validate(url): ''' https://stackoverflow.com/questions/7160737/python-how-to-validate-a-url-in-python-malformed-or-not ''' try: url_validator(url) except ValidationError as e: return False return True def format_time(t): ''' Universal method to string format time vars ''' return t.strftime(format_time_string) ########################################################################### ##################DATABASE FUNCTIONS####################################### ########################################################################### def bootstrap_table_format_field(entry, value): ''' Formats the field of a bootstrap table. Values are taken from bidings ''' if type(value) is str: if type(entry) is dict: return entry[value] else: return getattr(entry, value) elif callable(value): return value(entry) def serve_boostrap_table2(model, query_f, filters, bindings, **kwargs): ''' count_f = Tools.objects.values('name', 'url').annotate(Count('name')).count() query_f = Tools.objects.values('name', 'url').annotate(Count('name')) IT DOES NOT USE count_f ! ''' #count = count_f() order = kwargs['order'] offset = kwargs['offset'] limit = kwargs['limit'] from_offset = int(offset) to_offset = from_offset + int(limit) if 'filter' in kwargs: # "read" the filter filter_ = kwargs['filter'] filter_ = simplejson.loads(filter_) print ("Filter:") print (filter_) applied_filters = {filters[f][0](): filters[f][1](f_value) for f, f_value in filter_.items() if f in filters} print ("Applied filters:") print (applied_filters) else: applied_filters = {} querySet = query_f(applied_filters) count = querySet.count() querySet = querySet[from_offset:to_offset] ret = {'total': count} ret['rows'] = [ {k: bootstrap_table_format_field(entry, v) for k, v in bindings.items()} for entry in querySet] json = simplejson.dumps(ret) return HttpResponse(json, content_type='application/json') def serve_boostrap_table(model, bindings, order_by, **kwargs): ''' http://bootstrap-table.wenzhixin.net.cn/ ''' count = model.objects.count() order = kwargs['order'] offset = kwargs['offset'] limit = kwargs['limit'] from_offset = int(offset) to_offset = from_offset + int(limit) if 'filter' in kwargs: filter_ = kwargs['filter'] filter_ = simplejson.loads(filter_) filter_ = { bindings[k] + '__icontains':v for k,v in filter_.items()} querySet = model.objects.filter(**filter_) count = querySet.count() querySet = querySet[from_offset:to_offset] else: querySet = model.objects.order_by(order_by)[from_offset:to_offset] ret = {'total': count} ret['rows'] = [ {k: bootstrap_table_format_field(entry, v) for k, v in bindings.items()} for entry in querySet] json = simplejson.dumps(ret) return HttpResponse(json, content_type='application/json') def db_exists(model, filters): ''' Does this entry exist? ''' return model.objects.filter(**filters).exists() def get_maximum_current_version(model, name): ''' Return the next available current_version ''' max_entry = model.objects.filter(name=name).aggregate(Max('current_version')) if max_entry['current_version__max'] is None: return 1 assert type(max_entry) is dict assert len(max_entry) == 1 return max_entry['current_version__max'] + 1 def build_jstree_tool_dependencies(tool, prefix='', include_original=False): ''' Build the dependency jstree of this tool include_original are we including the original tool in the jstree? ''' def node(t): ret = { 'id': prefix + sep + t.name + sep + str(t.current_version), #Through this id we get info from jstree jandlers 'text': t.name + ' ' + str(t.current_version), 'children': [build_jstree_tool_dependencies(x, prefix, include_original=True) for x in t.dependencies.all()] + \ [{'text': x[0], 'type': 'exposed', 'value': x[1], 'description': x[2], 'id': prefix+sep+x[0]+sep+t.name+sep2+str(t.current_version)} for x in simplejson.loads(t.exposed)], 'current_version': t.current_version, 'name': t.name, 'type': 'tool', } return ret if include_original: return node(tool) else: return [node(dependent_tool) for dependent_tool in tool.dependencies.all()] def build_jstree(model, name, prefix=''): ''' Take an entry that has a previous_version and current_version Build a jstree compatible structure ''' index = {} if prefix: prefix_to_add = prefix + sep else: prefix_to_add = '' def node(o): current_version = o.current_version ret = { 'id': prefix_to_add + o.name + sep + str(o.current_version), 'text': o.name + ' ' + str(o.current_version), 'children': [], 'current_version': o.current_version, 'name': o.name } index[current_version] = ret return ret ret = [] all_objects = model.objects.filter(name=name).order_by("current_version") #ret.append(node(all_objects[0])) for o in all_objects: previous_version = o.previous_version if previous_version is None: ret.append(node(o)) else: this_node = node(o) index[previous_version]['children'].append(this_node) #print (simplejson.dumps(ret)) return ret ########################################################################### ##################END OF DATABASE####################################### ########################################################################### ########################################################################### ################## REGISTER ############################################### ########################################################################### @has_data @has_field(['username', 'password', 'password_confirm', 'email'], lambda x :'{} is required'.format(x)) @has_error def register(request, **kwargs): ''' Register ''' #print (kwargs) username = kwargs['username'] password = kwargs['password'] password_confirm = kwargs['password_confirm'] email = kwargs['email'] #Check if this user exists if username_exists(username): return fail('Username {} exists'.format(username)) #Check if password match if kwargs['password'] != kwargs['password_confirm']: return fail('Passwords do not match') #Create user user = User.objects.create_user(username, email, password) return success({}) @has_data @has_field(['username', 'password'], lambda x :'{} is required'.format(x)) @has_error def loginlocal(request, **kwargs): ''' Function called from login ''' username = kwargs['username'] password = kwargs['password'] user = authenticate(username=username, password=password) if user is None: return fail('Invalid username or password') #if user.is_active: ... # https://docs.djangoproject.com/en/1.9/topics/auth/default/ login(request, user) ret = {'username': username} return success(ret) def logoutlocal(request): ''' logout ''' logout(request) return redirect('/') ########################################################################### ################## END OF REGISTER ######################################## ########################################################################### ############################### ####REFERENCES################# ############################### def reference_get_fields(content): ''' Get the code of the bibtex entry ''' p = parse_reference_string(content, 'bibtex') p_len = len(p.entries) if p_len == 0: return False, 'Could not find BIBTEX entry' if p_len > 1: return False, 'More than one BIBTEX entries found' code = p.entries.keys()[0] if not 'title' in p.entries[code].fields: return False, 'Could not find title information' title = p.entries[code].fields['title'] if not hasattr(p.entries[code], 'persons'): return False, 'Could not find author information' if not 'author' in p.entries[code].persons: return False, 'Could not find author information' if len(p.entries[code].persons['author']) == 0: return False, 'Could not find author information' authors = sep.join([str(x) for x in p.entries[code].persons['author']]) return True, {'code': code, 'title': title, 'authors': authors} def bibtex_to_html(content): ''' Convert bibtex to html Adapted from: http://pybtex-docutils.readthedocs.io/en/latest/quickstart.html#overview ''' data = pybtex_parser.parse_stream(six.StringIO(content)) data_formatted = pybtex_style.format_entries(six.itervalues(data.entries)) output = io.StringIO() pybtex_html_backend.write_to_stream(data_formatted, output) html = output.getvalue() html_s = html.split('\n') html_s = html_s[9:-2] new_html = '\n'.join(html_s).replace('<dd>', '').replace('</dd>', '') return new_html @has_data @has_field(['content'], 'BIBTEX content is required') @has_error def add_reference(request, **kwargs): ''' Add reference ''' content = kwargs['content'] s, fields = reference_get_fields(content) if not s: return fail(fiels) if db_exists(Reference, {'code': fields['code']}): return fail('BIBTEX entry with code {} already exists'.format(code)) html = bibtex_to_html(content) r = Reference( user=get_user(request), code=fields['code'], title=fields['title'], authors=fields['authors'], content=content, reference_type='BIBTEX', html = html, ) r.save() return success() @has_data def get_references(request, **kwargs): ''' Serve GET Request for References bootstrap table ''' bindings = { 'id': 'code', 'content': 'html', } return serve_boostrap_table(Reference, bindings, 'id', **kwargs) @has_data @has_error def get_reference(request, **kwargs): ''' Get reference ''' codes = kwargs['codes'] ret = {'data': {}, 'html': []} c = 0 for code in codes: try: ref = Reference.objects.get(code=code) c += 1 ret['data'][code] = {'counter': c} ret['html'].append({'html': ref.html}) except ObjectDoesNotExist: pass ret['total'] = c return success(ret) @has_data def reference_suggestions(request, **kwargs): ''' Get called from tagas input ''' query = kwargs['query'] querySet = Reference.objects.filter(content__icontains = query)[:10] ret = [ {'value' : entry.code, 'html': entry.html} for entry in querySet] # We have a html representation for each Reference json = simplejson.dumps(ret) return HttpResponse(json, content_type='application/json') def get_references_from_text(text): ''' Get all reference objects from a text. This is useful for the report ''' ret = [] all_brackets = re.findall(r'\[[\w]+\]', text) for bracket in all_brackets: #Remove brackets code = bracket[1:-1] #Check if this a real reference try: ref = Reference.objects.get(code=code) except ObjectDoesNotExist: pass else: ret += [ref] return ret ############################### ######END OF REFERENCES######## ############################### ################################# #### REPORTS #################### ################################# @has_data def get_reports(request, **kwargs): ''' Serve bootstrap table for reports ''' bindings = { 'name': 'name', #'total_edits': lambda entry: entry['name__count'], 'content': lambda entry : '' } #return serve_boostrap_table(Reports, bindings, 'id', **kwargs) return serve_boostrap_table2( model = Reports, #count_f = lambda : Reports.objects.values('name').annotate(Count('name')).count(), query_f = lambda x : Reports.objects.filter(**x).values('name').distinct(), bindings = bindings, filters = { 'name': (lambda : 'name__icontains', lambda x : x) # name_contains = x }, **kwargs ) @has_data @has_error def get_reports_ui(request, **kwargs): name = kwargs['name'] current_version = kwargs['current_version'] report = Reports.objects.get(name=name, current_version=current_version) username = report.user.username ret = { 'name': name, 'current_version': current_version, 'username': username, 'created_at': format_time(report.created_at), 'markdown': report.markdown, 'summary': report.summary, } return success(ret) @has_data @has_error def add_report(request, **kwargs): name = kwargs['name'] previous_version = kwargs['previous_version'] markdown = kwargs['markdown'] references = kwargs['references'] user = get_user(request) #print (name) #print (previous_version) #print (markdown) #print (references) current_version = get_maximum_current_version(Reports, name) previous_version = kwargs["previous_version"] if previous_version == 'N/A': previous_version = None if current_version == 1: previous_version = None report = Reports( name=name, user=user, current_version=current_version, previous_version=previous_version, markdown=markdown, ) report.save() fetched_references = [Reference.objects.get(name=x) for x in references] report.references.add(*fetched_references) report.save() ret = { 'created_at' : format_time(report.created_at), 'current_version': current_version, 'jstree': build_jstree(Reports, report.name) } #print (ret) return success(ret) ################################# #### END OF REPORTS ############# ################################# ################################# ####TOOLS / DATA################# ################################# @has_data def get_tools(request, **kwargs): ''' Serve GET Request for Tools bootstrap table def serve_boostrap_table2(model, count_f, query_f, bindings, **kwargs): count_f = Tools.objects.values('name', 'url').annotate(Count('name')).count() query_f = Tools.objects.values('name', 'url').annotate(Count('name') ''' bindings = { 'name' : 'name', 'url': lambda entry : '<a href="{}" target="_blank">{}</a>'.format(entry['url'], entry['url']), #'total_edits': lambda entry: entry['name__count'], 'description': lambda entry: '' #'current_version': lambda entry: '{} -- {}'.format(entry.current_version, entry.previous_version), #'current_version': 'current_version', #'description': 'description', #'description': lambda entry: '{} {} -- {}'.format(entry.description, entry.current_version, entry.previous_version), } #return serve_boostrap_table(Tools, bindings, 'name', **kwargs) return serve_boostrap_table2( model = Tools, #count_f = lambda : Tools.objects.values('name', 'url').annotate(Count('name')).count(), query_f = lambda x : Tools.objects.values('name', 'url').annotate(Count('name')), filters = { }, bindings = bindings, **kwargs ) @has_data @has_error def get_tools_ui(request, **kwargs): ''' Called when we want an explicit tool from the UI ''' name = kwargs['name'] current_version = kwargs['current_version'] tool = Tools.objects.get(name=name, current_version=current_version) #print ('System: {}'.format(tool.system)) exposed = simplejson.loads(tool.exposed) if not len(exposed): exposed = [['', '', '']] jstree = build_jstree(Tools, tool.name) dependencies = build_jstree_tool_dependencies(tool, prefix='3', include_original=False) #print ('DEPENDENCIES:') #print (dependencies) ret = { 'name': tool.name, 'current_version': current_version, 'version' : tool.version, 'system' : simplejson.loads(tool.system), 'username': tool.user.username, 'created_at': format_time(tool.created_at), 'url': tool.url, 'description': tool.description, 'installation': tool.installation, 'validate_installation': tool.validate_installation, 'exposed': exposed, 'jstree': jstree, 'references': [x.code for x in tool.references.all()], 'summary': tool.summary, 'dependencies': dependencies } return success(ret) @has_data @has_field( ['name', 'version', 'url', 'description', 'installation'], ['Name cannot be empty', 'Version cannot be empty', 'Link cannot be empty', 'Description cannot be empty', 'Installation cannot be empty']) @has_error def add_tool(request, **kwargs): ''' Attempt to add a new Tool ''' system = kwargs['system'] system_p = simplejson.loads(system) if not len(system_p): return fail('Please select one or more systems') url = kwargs['url'] if not URL_validate(url): return fail('URL: {} does not seem to be valid'.format(url)) references = kwargs['references'] references = simplejson.loads(references) references = [Reference.objects.get(code=r) for r in references] name = kwargs['name'] current_version = get_maximum_current_version(Tools, name) previous_version = kwargs["previous_version"] if previous_version == 'N/A': previous_version = None # else: # print ('Previous version: {}'.format(previous_version)) # print ('Current version: {}'.format(current_version)) # a=1/0 # Throw exception deliberately print ('Current version: {}'.format(current_version)) user = get_user(request) version = kwargs['version'] description = kwargs['description'] installation=kwargs['installation'] validate_installation = kwargs['validate_installation'] exposed = kwargs['exposed'] #print ('Exposed: {} {}'.format(exposed, type(exposed).__name__)) # This is a list exposed = [e for e in exposed if any(e)] # Remove empty exposed = simplejson.dumps(exposed) # Serialize summary = kwargs['summary'] new_tool = Tools( user=user, name=name, version=version, system=system, current_version=current_version, previous_version=previous_version, url = url, description = description, installation = installation, validate_installation = validate_installation, exposed = exposed, summary = summary, ); new_tool.save() #Add references new_tool.references.add(*references) new_tool.save() jstree = build_jstree(Tools, new_tool.name) #Add dependencies dependencies = kwargs['dependencies'] dependencies_objects = [Tools.objects.get(name=dependency['name'], current_version=dependency['current_version']) for dependency in dependencies] new_tool.dependencies.add(*dependencies_objects) new_tool.save() #Get created at created_at = format_time(new_tool.created_at) #print ('Created at: {}'.format(created_at)) ret = { 'created_at': created_at, 'current_version': current_version, 'jstree': jstree } return success(ret) @has_data @has_error def jstree_tool(request, **kwargs): ''' AJAX backend to get the version jstree for a tool ''' name = kwargs['name'] prefix = kwargs['prefix'] ret = { 'jstree' : build_jstree(Tools, name, prefix=prefix), } return success(ret) @has_data @has_error def jstree_report(request, **kwargs): ''' AJAX backend to get the version jstree for a tool ''' name = kwargs['name'] prefix = kwargs['prefix'] ret = { 'jstree' : build_jstree(Reports, name, prefix=prefix), } return success(ret) @has_data @has_error def jstree_wf(request, **kwargs): ''' AJAX backend to get the version jstree for a tool ''' name = kwargs['name'] prefix = kwargs['prefix'] ret = { 'jstree' : build_jstree(Tasks, name, prefix=prefix), } return success(ret) @has_data @has_error def jstree_tool_dependencies(request, **kwargs): ''' AJAX backend to get the dependency jstree for a tool ''' name = kwargs['name'] current_version = int(kwargs['current_version']) if 'prefix' in kwargs: prefix=kwargs['prefix'] else: prefix = '3' tool = Tools.objects.get(name=name, current_version=current_version) ret = { 'jstree': build_jstree_tool_dependencies(tool, prefix=prefix, include_original=True) } #print(ret) return success(ret) @has_data @has_error def get_tool_dependencies(request, **kwargs): ''' Return ONE LEVEL dependencies of this tool ''' name = kwargs['name'] current_version = int(kwargs['current_version']) tool = Tools.objects.get(name=name, current_version=current_version) ret = { 'dependencies': [{'name': x.name, 'current_version': x.current_version} for x in tool.dependencies.all()] } return success(ret) @has_data @has_error def get_tool_variables(request, **kwargs): ''' Return the variables of this tool ''' name = kwargs['name'] current_version = int(kwargs['current_version']) tool = Tools.objects.get(name=name, current_version=current_version) ret = { 'variables': simplejson.loads(tool.exposed) } return success(ret) ######################################## ####END OF TOOLS / DATA################# ######################################## ######################################## ######### WORKFLOWS #################### ######################################## def jason_or_django(f): ''' getattr and iterate methods for JSON or DJANGO objects ''' def dec(*args, **kwargs): if type(args[0]) is dict: attr = lambda x,y : x[y] iterate = lambda x,y : (k for k in x[y]) elif type(args[0]) is Tasks: attr = lambda x,y : getattr(x,y) iterate = lambda x,y : (k for k in getattr(x,y).all()) else: raise ArkalosException('This should never happen: {}'.format(type(task))) kwargs['attr'] = attr kwargs['iterate'] = iterate return f(*args, **kwargs) return dec @jason_or_django def task_hash(task, **kwargs): ''' Creates a unique hash for this task attr: Get attribute iterate: Iterator ''' attr = kwargs['attr'] iterate = kwargs['iterate'] # Dictionary version # to_hash = [ # task['name'], # task['bash'], # task['documentation'], # '@@'.join(['&&'.join((x['name'], str(x['current_version']))) for x in task['dependencies'] if x['type'] == 'tool']), # '!!'.join(['**'.join((x['name'], str(x['current_version']) if x['is_workflow'] else 'None')) for x in task['calls']]), # '##'.join(task['inputs']), # '$$'.join(task['outputs']) # ] # This works with both dictionary and django database objects to_hash = [ attr(task, 'name'), attr(task, 'bash'), attr(task, 'documentation'), '@@'.join(['&&'.join((attr(x, 'name'), str(attr(x, 'current_version')))) for x in iterate(task, 'dependencies')]), '!!'.join(['**'.join((attr(x, 'name'), str(attr(x, 'current_version')) if attr(x, 'current_version') else 'None')) for x in iterate(task, 'calls')]), '##'.join(attr(task, 'inputs')), '$$'.join(attr(task, 'outputs')), ] to_hash = '^^'.join(to_hash) to_hash_b = bytearray(to_hash, encoding="utf-8") return hashlib.sha256(to_hash_b).hexdigest() def save_task_or_workflow(request, workflow_or_task): ''' Saves a workflow or task ''' if workflow_or_task['is_workflow']: # This is worflow is_workflow = True if workflow_or_task['current_version'] is None: # This workflow is not saved # Get the previous_version previous_version = workflow_or_task['previous_version'] # Get the current number current_version = get_maximum_current_version(Tasks, workflow_or_task['name']) else: # This workflow is saved. Find it and return it worklfow = Tasks.objects.get(name=workflow_or_task['name'], current_version=workflow_or_task['current_version']) return worklfow else: # This is a task is_workflow = False current_version = None previous_version = None #Check if it exists in the database try: task = Tasks.objects.get(hash_field=workflow_or_task['hash_value']) except ObjectDoesNotExist: pass else: return task # It does not exist. Create it! task = Tasks( user=get_user(request), name=workflow_or_task['name'], current_version=current_version, previous_version=previous_version, bash=workflow_or_task['bash'], documentation=workflow_or_task['documentation'], hash_field=workflow_or_task['hash_value'], is_workflow=is_workflow, inputs=simplejson.dumps(workflow_or_task['inputs']), outputs=simplejson.dumps(workflow_or_task['outputs']), ) task.save() # Add dependencies tools = [] for dependency in workflow_or_task['dependencies']: if dependency['type'] != 'tool': continue tools += [Tools.objects.get(name=dependency['name'], current_version=dependency['current_version'])] task.dependencies.add(*tools) task.save() # Add references refs = get_references_from_text(workflow_or_task['documentation']) task.references.add(*refs) task.save() return task def update_TasksStats(task): ''' Update the stats of this task ''' name = task.name try: taskStat = TasksStats.objects.get(name=name) except ObjectDoesNotExist: taskStat = TasksStats( name=name, edits=1, users=1, last_edit=task, ) else: taskStat.edits += 1 taskStat.users = Tasks.objects.filter(name=name).values('user').count() taskStat.last_edit=task finally: taskStat.save() @has_data @has_error def add_workflow(request, **kwargs): ''' Add a new workflow ''' graph = kwargs['graph'] main_guid = kwargs['main_guid'] #Fix is_workflow for node in graph: node['is_workflow'] = node['type'] == 'workflow' #Take main node main_node = None for node in graph: if node['guid'] == main_guid: main_node = node break assert not (main_node is None) assert main_node['is_workflow'] # Check if there is another workflow with the same name if main_node['previous_version'] is None: # It is a new workflow! if db_exists(Tasks, {'name': main_node['name']}): return fail('Another workflow with this name exists. Please choose another name') # Check if this workflow calls another workflow which is unsaved (this is not allowed) for node in graph: if not node['is_workflow']: # It is not a workflow continue if node['guid'] == main_guid: # It is not the main workflow continue if node['current_version'] is None: # It is not saved return fail('Could not save. Workflow: {} calls an UNSAVED workflow: {}'.format(main_node['name'], node['name'])) #Fix the "calls" guids_to_graph = {node['guid']:node for node in graph} for node in graph: node['calls'] = [{'name': guids_to_graph[callee_guid]['name'], 'current_version': guids_to_graph[callee_guid]['current_version']} for callee_guid in node['serial_calls']] #Do the following three things: #1. Add hash_value information #2. Take the hash of the main workflow #3. Create a mapping from GUIDs to hash_values from_guid_to_hash = {} main_hash = None guids_to_hashes = {} for node in graph: #print ('======') #print(node) node['hash_value'] = task_hash(node) if node['guid'] == main_guid: main_hash = node['hash_value'] guids_to_hashes[node['guid']] = node['hash_value'] assert not (main_hash is None) # Save the graph and create a new dictionary with the saved objects hash_objects_dict = { node['hash_value']: save_task_or_workflow(request, node) for node in graph } #Add the who calls whom information for node in graph: this_node_called =[hash_objects_dict[guids_to_hashes[callee_guid]] for callee_guid in node['serial_calls']] if this_node_called: hash_objects_dict[node['hash_value']].calls.add(*this_node_called) hash_objects_dict[node['hash_value']].save() #Update TaskStats. Perhaps can be done better with signals update_TasksStats(hash_objects_dict[main_hash]) ret = { 'current_version': hash_objects_dict[main_hash].current_version, 'created_at': format_time(hash_objects_dict[main_hash].created_at), } return success(ret) def workflow_graph(workflow_or_task): ''' Create a caller--callee graph identical to the one sent from angular for a workflow ''' ret = [] all_hashes = [] def create_node(node): ret = { 'bash': node.bash, 'current_version': node.current_version, 'previous_version': node.previous_version, 'documentation': node.documentation, 'tools_jstree_data': [build_jstree_tool_dependencies(tool, prefix='5', include_original=True) for x in node.dependencies.all()], 'inputs': simplejson.loads(node.inputs), 'outputs': simplejson.loads(node.outputs), 'type': 'workflow' if node.is_workflow else 'task', 'hash_value': node.hash_field, 'children': [] } if node.is_workflow: ret['name'] = node.name + '_' + str(node.current_version) ret['workflow_name'] = node.name ret['created_at'] = format_time(node.created_at) ret['username'] = node.user.username else: ret['name'] = node.name return ret def workflow_graph_rec(node): if node.hash_field in all_hashes: return all_hashes.append(node.hash_field) ret_json = create_node(node) ret_json['serial_calls'] = [] for callee in node.calls.all(): ret_json['serial_calls'].append(callee.hash_field) workflow_graph_rec(callee) ret.append(ret_json) workflow_graph_rec(workflow_or_task) return ret @has_data def get_workflow(request, **kwargs): ''' Creates a json object EXACTTLY the same as the one saved return { "name": node.type == 'workflow' ? node.workflow_name : node.name, "bash": node.bash, "current_version": node.current_version, // This is always null "previous_version": node.previous_version, "documentation": node.documentation, "dependencies": node.tools_jstree_data, "serial_calls" : node.serial_calls, "inputs": node.inputs, "outputs": node.outputs, "type": node.type, "guid": node.guid }; ''' name = kwargs['name'] current_version = kwargs['current_version'] wf = Tasks.objects.get(name=name, current_version=current_version) graph = workflow_graph(wf) # print ('ret:') # print (ret) ret = { 'graph': graph, 'main_hash': wf.hash_field } return success(ret) @has_data def get_workflows(request, **kwargs): ''' Serve bootstrap table for workflows ''' def description(entry): ret = '<p>Edits: <strong>%i</strong> Users: <strong>%i</strong> Last Edit: <strong>%s</strong><br />Last documentation: %s</p>' % (entry.edits, entry.users, format_time(entry.last_edit.created_at), entry.last_edit.documentation) return ret bindings = { 'name' : 'name', 'description': description, } #return serve_boostrap_table(Tools, bindings, 'name', **kwargs) return serve_boostrap_table2( model = TasksStats, #count_f = lambda : Tasks.objects.values('name').count(), # COUNT ALL query_f = lambda x : TasksStats.objects.filter(**x), # Query function filters = { 'name': (lambda : 'name__icontains', lambda x : x) # name_contains = x }, bindings = bindings, **kwargs ) ######################################## ####### END OF WORKFLOWS ############### ########################################
28.579895
236
0.590729
from django.http import HttpResponse from django.contrib.auth.models import User from django.contrib.auth import authenticate, login, logout from django.shortcuts import redirect from django.core.validators import URLValidator from django.core.exceptions import ValidationError, ObjectDoesNotExist from django.db.models import Max, Count from app.models import Reference, Tools, Reports, Tasks, TasksStats import io import re import six import uuid import hashlib import simplejson from pybtex.database import parse_string as parse_reference_string import pybtex.database.input.bibtex import pybtex.plugin pybtex_style = pybtex.plugin.find_plugin('pybtex.style.formatting', 'plain')() pybtex_html_backend = pybtex.plugin.find_plugin('pybtex.backends', 'html')() pybtex_parser = pybtex.database.input.bibtex.Parser() sep = '||' sep2 = '@@' format_time_string = '%a, %d %b %Y %H:%M:%S' ption): pass def get_guid(): return str(uuid.uuid4()) def get_user_id(request): is_authenticated = request.user.is_authenticated() if is_authenticated: return request.user.id return None def get_user(request): is_authenticated = request.user.is_authenticated() if is_authenticated: return request.user return None def fail(error_message=None): ret = {'success': False, 'error_message': error_message} json = simplejson.dumps(ret) return HttpResponse(json, content_type='application/json') def success(data={}): data['success'] = True json = simplejson.dumps(data) return HttpResponse(json, content_type='application/json') def has_data(f): def wrapper(*args, **kwargs): request = args[0] if request.method == 'POST': if len(request.POST): for k in request.POST: kwargs[k] = request.POST[k] else: POST = simplejson.loads(request.body) for k in POST: kwargs[k] = POST[k] elif request.method == 'GET': for k in request.GET: kwargs[k] = request.GET[k] print ("GET: {} == {}".format(k, kwargs[k])) return f(*args, **kwargs) return wrapper def has_field(field_names, errors): def decorator(f): def wrapper(*args, **kwargs): for field_index, field_name in enumerate(field_names): if not field_name in kwargs: if callable(errors): kwargs['error'] = errors(field_name) elif type(errors) is list: kwargs['error'] = errors[field_index] elif type(errors) is dict: kwargs['error'] = errors[field_name] elif type(errors) is str: kwargs['error'] = errors else: raise ArkalosException('Unknown error type: {}'.format(type(error).__name__)) return f(*args, **kwargs) return f(*args, **kwargs) return wrapper return decorator def has_error(f): def wrapper(*args, **kwargs): if 'error' in kwargs: return fail(kwargs['error']) return f(*args, **kwargs) return wrapper def username_exists(username): return User.objects.filter(username=username).exists() def URL_validate(url): try: url_validator(url) except ValidationError as e: return False return True def format_time(t): return t.strftime(format_time_string)
true
true
f70297f48478ba961bd1e24c7eaa558a44b1e6f1
52,230
py
Python
app/venv/lib/python2.7/site-packages/numpy/lib/arraypad.py
anaheino/Ufo-sightings-map
64af02093f97737cbbdfd8af9e1aeb4d8aa8fcdc
[ "MIT" ]
652
2015-07-26T00:00:17.000Z
2022-02-24T18:30:04.000Z
app/venv/lib/python2.7/site-packages/numpy/lib/arraypad.py
anaheino/Ufo-sightings-map
64af02093f97737cbbdfd8af9e1aeb4d8aa8fcdc
[ "MIT" ]
75
2016-01-14T16:03:02.000Z
2020-04-29T22:51:53.000Z
app/venv/lib/python2.7/site-packages/numpy/lib/arraypad.py
anaheino/Ufo-sightings-map
64af02093f97737cbbdfd8af9e1aeb4d8aa8fcdc
[ "MIT" ]
40
2015-07-24T19:45:08.000Z
2021-11-01T14:54:56.000Z
""" The arraypad module contains a group of functions to pad values onto the edges of an n-dimensional array. """ from __future__ import division, absolute_import, print_function import numpy as np __all__ = ['pad'] ############################################################################### # Private utility functions. def _arange_ndarray(arr, shape, axis, reverse=False): """ Create an ndarray of `shape` with increments along specified `axis` Parameters ---------- arr : ndarray Input array of arbitrary shape. shape : tuple of ints Shape of desired array. Should be equivalent to `arr.shape` except `shape[axis]` which may have any positive value. axis : int Axis to increment along. reverse : bool If False, increment in a positive fashion from 1 to `shape[axis]`, inclusive. If True, the bounds are the same but the order reversed. Returns ------- padarr : ndarray Output array sized to pad `arr` along `axis`, with linear range from 1 to `shape[axis]` along specified `axis`. Notes ----- The range is deliberately 1-indexed for this specific use case. Think of this algorithm as broadcasting `np.arange` to a single `axis` of an arbitrarily shaped ndarray. """ initshape = tuple(1 if i != axis else shape[axis] for (i, x) in enumerate(arr.shape)) if not reverse: padarr = np.arange(1, shape[axis] + 1) else: padarr = np.arange(shape[axis], 0, -1) padarr = padarr.reshape(initshape) for i, dim in enumerate(shape): if padarr.shape[i] != dim: padarr = padarr.repeat(dim, axis=i) return padarr def _round_ifneeded(arr, dtype): """ Rounds arr inplace if destination dtype is integer. Parameters ---------- arr : ndarray Input array. dtype : dtype The dtype of the destination array. """ if np.issubdtype(dtype, np.integer): arr.round(out=arr) def _prepend_const(arr, pad_amt, val, axis=-1): """ Prepend constant `val` along `axis` of `arr`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to prepend. val : scalar Constant value to use. For best results should be of type `arr.dtype`; if not `arr.dtype` will be cast to `arr.dtype`. axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` constant `val` prepended along `axis`. """ if pad_amt == 0: return arr padshape = tuple(x if i != axis else pad_amt for (i, x) in enumerate(arr.shape)) if val == 0: return np.concatenate((np.zeros(padshape, dtype=arr.dtype), arr), axis=axis) else: return np.concatenate(((np.zeros(padshape) + val).astype(arr.dtype), arr), axis=axis) def _append_const(arr, pad_amt, val, axis=-1): """ Append constant `val` along `axis` of `arr`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to append. val : scalar Constant value to use. For best results should be of type `arr.dtype`; if not `arr.dtype` will be cast to `arr.dtype`. axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` constant `val` appended along `axis`. """ if pad_amt == 0: return arr padshape = tuple(x if i != axis else pad_amt for (i, x) in enumerate(arr.shape)) if val == 0: return np.concatenate((arr, np.zeros(padshape, dtype=arr.dtype)), axis=axis) else: return np.concatenate( (arr, (np.zeros(padshape) + val).astype(arr.dtype)), axis=axis) def _prepend_edge(arr, pad_amt, axis=-1): """ Prepend `pad_amt` to `arr` along `axis` by extending edge values. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to prepend. axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, extended by `pad_amt` edge values appended along `axis`. """ if pad_amt == 0: return arr edge_slice = tuple(slice(None) if i != axis else 0 for (i, x) in enumerate(arr.shape)) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) edge_arr = arr[edge_slice].reshape(pad_singleton) return np.concatenate((edge_arr.repeat(pad_amt, axis=axis), arr), axis=axis) def _append_edge(arr, pad_amt, axis=-1): """ Append `pad_amt` to `arr` along `axis` by extending edge values. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to append. axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, extended by `pad_amt` edge values prepended along `axis`. """ if pad_amt == 0: return arr edge_slice = tuple(slice(None) if i != axis else arr.shape[axis] - 1 for (i, x) in enumerate(arr.shape)) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) edge_arr = arr[edge_slice].reshape(pad_singleton) return np.concatenate((arr, edge_arr.repeat(pad_amt, axis=axis)), axis=axis) def _prepend_ramp(arr, pad_amt, end, axis=-1): """ Prepend linear ramp along `axis`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to prepend. end : scalar Constal value to use. For best results should be of type `arr.dtype`; if not `arr.dtype` will be cast to `arr.dtype`. axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values prepended along `axis`. The prepended region ramps linearly from the edge value to `end`. """ if pad_amt == 0: return arr # Generate shape for final concatenated array padshape = tuple(x if i != axis else pad_amt for (i, x) in enumerate(arr.shape)) # Generate an n-dimensional array incrementing along `axis` ramp_arr = _arange_ndarray(arr, padshape, axis, reverse=True).astype(np.float64) # Appropriate slicing to extract n-dimensional edge along `axis` edge_slice = tuple(slice(None) if i != axis else 0 for (i, x) in enumerate(arr.shape)) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract edge, reshape to original rank, and extend along `axis` edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis) # Linear ramp slope = (end - edge_pad) / float(pad_amt) ramp_arr = ramp_arr * slope ramp_arr += edge_pad _round_ifneeded(ramp_arr, arr.dtype) # Ramp values will most likely be float, cast them to the same type as arr return np.concatenate((ramp_arr.astype(arr.dtype), arr), axis=axis) def _append_ramp(arr, pad_amt, end, axis=-1): """ Append linear ramp along `axis`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to append. end : scalar Constal value to use. For best results should be of type `arr.dtype`; if not `arr.dtype` will be cast to `arr.dtype`. axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values appended along `axis`. The appended region ramps linearly from the edge value to `end`. """ if pad_amt == 0: return arr # Generate shape for final concatenated array padshape = tuple(x if i != axis else pad_amt for (i, x) in enumerate(arr.shape)) # Generate an n-dimensional array incrementing along `axis` ramp_arr = _arange_ndarray(arr, padshape, axis, reverse=False).astype(np.float64) # Slice a chunk from the edge to calculate stats on edge_slice = tuple(slice(None) if i != axis else -1 for (i, x) in enumerate(arr.shape)) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract edge, reshape to original rank, and extend along `axis` edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis) # Linear ramp slope = (end - edge_pad) / float(pad_amt) ramp_arr = ramp_arr * slope ramp_arr += edge_pad _round_ifneeded(ramp_arr, arr.dtype) # Ramp values will most likely be float, cast them to the same type as arr return np.concatenate((arr, ramp_arr.astype(arr.dtype)), axis=axis) def _prepend_max(arr, pad_amt, num, axis=-1): """ Prepend `pad_amt` maximum values along `axis`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to prepend. num : int Depth into `arr` along `axis` to calculate maximum. Range: [1, `arr.shape[axis]`] or None (entire axis) axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values appended along `axis`. The prepended region is the maximum of the first `num` values along `axis`. """ if pad_amt == 0: return arr # Equivalent to edge padding for single value, so do that instead if num == 1: return _prepend_edge(arr, pad_amt, axis) # Use entire array if `num` is too large if num is not None: if num >= arr.shape[axis]: num = None # Slice a chunk from the edge to calculate stats on max_slice = tuple(slice(None) if i != axis else slice(num) for (i, x) in enumerate(arr.shape)) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract slice, calculate max, reshape to add singleton dimension back max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton) # Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt` return np.concatenate((max_chunk.repeat(pad_amt, axis=axis), arr), axis=axis) def _append_max(arr, pad_amt, num, axis=-1): """ Pad one `axis` of `arr` with the maximum of the last `num` elements. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to append. num : int Depth into `arr` along `axis` to calculate maximum. Range: [1, `arr.shape[axis]`] or None (entire axis) axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values appended along `axis`. The appended region is the maximum of the final `num` values along `axis`. """ if pad_amt == 0: return arr # Equivalent to edge padding for single value, so do that instead if num == 1: return _append_edge(arr, pad_amt, axis) # Use entire array if `num` is too large if num is not None: if num >= arr.shape[axis]: num = None # Slice a chunk from the edge to calculate stats on end = arr.shape[axis] - 1 if num is not None: max_slice = tuple( slice(None) if i != axis else slice(end, end - num, -1) for (i, x) in enumerate(arr.shape)) else: max_slice = tuple(slice(None) for x in arr.shape) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract slice, calculate max, reshape to add singleton dimension back max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton) # Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt` return np.concatenate((arr, max_chunk.repeat(pad_amt, axis=axis)), axis=axis) def _prepend_mean(arr, pad_amt, num, axis=-1): """ Prepend `pad_amt` mean values along `axis`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to prepend. num : int Depth into `arr` along `axis` to calculate mean. Range: [1, `arr.shape[axis]`] or None (entire axis) axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values prepended along `axis`. The prepended region is the mean of the first `num` values along `axis`. """ if pad_amt == 0: return arr # Equivalent to edge padding for single value, so do that instead if num == 1: return _prepend_edge(arr, pad_amt, axis) # Use entire array if `num` is too large if num is not None: if num >= arr.shape[axis]: num = None # Slice a chunk from the edge to calculate stats on mean_slice = tuple(slice(None) if i != axis else slice(num) for (i, x) in enumerate(arr.shape)) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract slice, calculate mean, reshape to add singleton dimension back mean_chunk = arr[mean_slice].mean(axis).reshape(pad_singleton) _round_ifneeded(mean_chunk, arr.dtype) # Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt` return np.concatenate((mean_chunk.repeat(pad_amt, axis).astype(arr.dtype), arr), axis=axis) def _append_mean(arr, pad_amt, num, axis=-1): """ Append `pad_amt` mean values along `axis`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to append. num : int Depth into `arr` along `axis` to calculate mean. Range: [1, `arr.shape[axis]`] or None (entire axis) axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values appended along `axis`. The appended region is the maximum of the final `num` values along `axis`. """ if pad_amt == 0: return arr # Equivalent to edge padding for single value, so do that instead if num == 1: return _append_edge(arr, pad_amt, axis) # Use entire array if `num` is too large if num is not None: if num >= arr.shape[axis]: num = None # Slice a chunk from the edge to calculate stats on end = arr.shape[axis] - 1 if num is not None: mean_slice = tuple( slice(None) if i != axis else slice(end, end - num, -1) for (i, x) in enumerate(arr.shape)) else: mean_slice = tuple(slice(None) for x in arr.shape) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract slice, calculate mean, reshape to add singleton dimension back mean_chunk = arr[mean_slice].mean(axis=axis).reshape(pad_singleton) _round_ifneeded(mean_chunk, arr.dtype) # Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt` return np.concatenate( (arr, mean_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis) def _prepend_med(arr, pad_amt, num, axis=-1): """ Prepend `pad_amt` median values along `axis`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to prepend. num : int Depth into `arr` along `axis` to calculate median. Range: [1, `arr.shape[axis]`] or None (entire axis) axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values prepended along `axis`. The prepended region is the median of the first `num` values along `axis`. """ if pad_amt == 0: return arr # Equivalent to edge padding for single value, so do that instead if num == 1: return _prepend_edge(arr, pad_amt, axis) # Use entire array if `num` is too large if num is not None: if num >= arr.shape[axis]: num = None # Slice a chunk from the edge to calculate stats on med_slice = tuple(slice(None) if i != axis else slice(num) for (i, x) in enumerate(arr.shape)) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract slice, calculate median, reshape to add singleton dimension back med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton) _round_ifneeded(med_chunk, arr.dtype) # Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt` return np.concatenate( (med_chunk.repeat(pad_amt, axis).astype(arr.dtype), arr), axis=axis) def _append_med(arr, pad_amt, num, axis=-1): """ Append `pad_amt` median values along `axis`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to append. num : int Depth into `arr` along `axis` to calculate median. Range: [1, `arr.shape[axis]`] or None (entire axis) axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values appended along `axis`. The appended region is the median of the final `num` values along `axis`. """ if pad_amt == 0: return arr # Equivalent to edge padding for single value, so do that instead if num == 1: return _append_edge(arr, pad_amt, axis) # Use entire array if `num` is too large if num is not None: if num >= arr.shape[axis]: num = None # Slice a chunk from the edge to calculate stats on end = arr.shape[axis] - 1 if num is not None: med_slice = tuple( slice(None) if i != axis else slice(end, end - num, -1) for (i, x) in enumerate(arr.shape)) else: med_slice = tuple(slice(None) for x in arr.shape) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract slice, calculate median, reshape to add singleton dimension back med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton) _round_ifneeded(med_chunk, arr.dtype) # Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt` return np.concatenate( (arr, med_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis) def _prepend_min(arr, pad_amt, num, axis=-1): """ Prepend `pad_amt` minimum values along `axis`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to prepend. num : int Depth into `arr` along `axis` to calculate minimum. Range: [1, `arr.shape[axis]`] or None (entire axis) axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values prepended along `axis`. The prepended region is the minimum of the first `num` values along `axis`. """ if pad_amt == 0: return arr # Equivalent to edge padding for single value, so do that instead if num == 1: return _prepend_edge(arr, pad_amt, axis) # Use entire array if `num` is too large if num is not None: if num >= arr.shape[axis]: num = None # Slice a chunk from the edge to calculate stats on min_slice = tuple(slice(None) if i != axis else slice(num) for (i, x) in enumerate(arr.shape)) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract slice, calculate min, reshape to add singleton dimension back min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton) # Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt` return np.concatenate((min_chunk.repeat(pad_amt, axis=axis), arr), axis=axis) def _append_min(arr, pad_amt, num, axis=-1): """ Append `pad_amt` median values along `axis`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to append. num : int Depth into `arr` along `axis` to calculate minimum. Range: [1, `arr.shape[axis]`] or None (entire axis) axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values appended along `axis`. The appended region is the minimum of the final `num` values along `axis`. """ if pad_amt == 0: return arr # Equivalent to edge padding for single value, so do that instead if num == 1: return _append_edge(arr, pad_amt, axis) # Use entire array if `num` is too large if num is not None: if num >= arr.shape[axis]: num = None # Slice a chunk from the edge to calculate stats on end = arr.shape[axis] - 1 if num is not None: min_slice = tuple( slice(None) if i != axis else slice(end, end - num, -1) for (i, x) in enumerate(arr.shape)) else: min_slice = tuple(slice(None) for x in arr.shape) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract slice, calculate min, reshape to add singleton dimension back min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton) # Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt` return np.concatenate((arr, min_chunk.repeat(pad_amt, axis=axis)), axis=axis) def _pad_ref(arr, pad_amt, method, axis=-1): """ Pad `axis` of `arr` by reflection. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : tuple of ints, length 2 Padding to (prepend, append) along `axis`. method : str Controls method of reflection; options are 'even' or 'odd'. axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt[0]` values prepended and `pad_amt[1]` values appended along `axis`. Both regions are padded with reflected values from the original array. Notes ----- This algorithm does not pad with repetition, i.e. the edges are not repeated in the reflection. For that behavior, use `mode='symmetric'`. The modes 'reflect', 'symmetric', and 'wrap' must be padded with a single function, lest the indexing tricks in non-integer multiples of the original shape would violate repetition in the final iteration. """ # Implicit booleanness to test for zero (or None) in any scalar type if pad_amt[0] == 0 and pad_amt[1] == 0: return arr ########################################################################## # Prepended region # Slice off a reverse indexed chunk from near edge to pad `arr` before ref_slice = tuple(slice(None) if i != axis else slice(pad_amt[0], 0, -1) for (i, x) in enumerate(arr.shape)) ref_chunk1 = arr[ref_slice] # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) if pad_amt[0] == 1: ref_chunk1 = ref_chunk1.reshape(pad_singleton) # Memory/computationally more expensive, only do this if `method='odd'` if 'odd' in method and pad_amt[0] > 0: edge_slice1 = tuple(slice(None) if i != axis else 0 for (i, x) in enumerate(arr.shape)) edge_chunk = arr[edge_slice1].reshape(pad_singleton) ref_chunk1 = 2 * edge_chunk - ref_chunk1 del edge_chunk ########################################################################## # Appended region # Slice off a reverse indexed chunk from far edge to pad `arr` after start = arr.shape[axis] - pad_amt[1] - 1 end = arr.shape[axis] - 1 ref_slice = tuple(slice(None) if i != axis else slice(start, end) for (i, x) in enumerate(arr.shape)) rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1) for (i, x) in enumerate(arr.shape)) ref_chunk2 = arr[ref_slice][rev_idx] if pad_amt[1] == 1: ref_chunk2 = ref_chunk2.reshape(pad_singleton) if 'odd' in method: edge_slice2 = tuple(slice(None) if i != axis else -1 for (i, x) in enumerate(arr.shape)) edge_chunk = arr[edge_slice2].reshape(pad_singleton) ref_chunk2 = 2 * edge_chunk - ref_chunk2 del edge_chunk # Concatenate `arr` with both chunks, extending along `axis` return np.concatenate((ref_chunk1, arr, ref_chunk2), axis=axis) def _pad_sym(arr, pad_amt, method, axis=-1): """ Pad `axis` of `arr` by symmetry. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : tuple of ints, length 2 Padding to (prepend, append) along `axis`. method : str Controls method of symmetry; options are 'even' or 'odd'. axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt[0]` values prepended and `pad_amt[1]` values appended along `axis`. Both regions are padded with symmetric values from the original array. Notes ----- This algorithm DOES pad with repetition, i.e. the edges are repeated. For padding without repeated edges, use `mode='reflect'`. The modes 'reflect', 'symmetric', and 'wrap' must be padded with a single function, lest the indexing tricks in non-integer multiples of the original shape would violate repetition in the final iteration. """ # Implicit booleanness to test for zero (or None) in any scalar type if pad_amt[0] == 0 and pad_amt[1] == 0: return arr ########################################################################## # Prepended region # Slice off a reverse indexed chunk from near edge to pad `arr` before sym_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[0]) for (i, x) in enumerate(arr.shape)) rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1) for (i, x) in enumerate(arr.shape)) sym_chunk1 = arr[sym_slice][rev_idx] # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) if pad_amt[0] == 1: sym_chunk1 = sym_chunk1.reshape(pad_singleton) # Memory/computationally more expensive, only do this if `method='odd'` if 'odd' in method and pad_amt[0] > 0: edge_slice1 = tuple(slice(None) if i != axis else 0 for (i, x) in enumerate(arr.shape)) edge_chunk = arr[edge_slice1].reshape(pad_singleton) sym_chunk1 = 2 * edge_chunk - sym_chunk1 del edge_chunk ########################################################################## # Appended region # Slice off a reverse indexed chunk from far edge to pad `arr` after start = arr.shape[axis] - pad_amt[1] end = arr.shape[axis] sym_slice = tuple(slice(None) if i != axis else slice(start, end) for (i, x) in enumerate(arr.shape)) sym_chunk2 = arr[sym_slice][rev_idx] if pad_amt[1] == 1: sym_chunk2 = sym_chunk2.reshape(pad_singleton) if 'odd' in method: edge_slice2 = tuple(slice(None) if i != axis else -1 for (i, x) in enumerate(arr.shape)) edge_chunk = arr[edge_slice2].reshape(pad_singleton) sym_chunk2 = 2 * edge_chunk - sym_chunk2 del edge_chunk # Concatenate `arr` with both chunks, extending along `axis` return np.concatenate((sym_chunk1, arr, sym_chunk2), axis=axis) def _pad_wrap(arr, pad_amt, axis=-1): """ Pad `axis` of `arr` via wrapping. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : tuple of ints, length 2 Padding to (prepend, append) along `axis`. axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt[0]` values prepended and `pad_amt[1]` values appended along `axis`. Both regions are padded wrapped values from the opposite end of `axis`. Notes ----- This method of padding is also known as 'tile' or 'tiling'. The modes 'reflect', 'symmetric', and 'wrap' must be padded with a single function, lest the indexing tricks in non-integer multiples of the original shape would violate repetition in the final iteration. """ # Implicit booleanness to test for zero (or None) in any scalar type if pad_amt[0] == 0 and pad_amt[1] == 0: return arr ########################################################################## # Prepended region # Slice off a reverse indexed chunk from near edge to pad `arr` before start = arr.shape[axis] - pad_amt[0] end = arr.shape[axis] wrap_slice = tuple(slice(None) if i != axis else slice(start, end) for (i, x) in enumerate(arr.shape)) wrap_chunk1 = arr[wrap_slice] # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) if pad_amt[0] == 1: wrap_chunk1 = wrap_chunk1.reshape(pad_singleton) ########################################################################## # Appended region # Slice off a reverse indexed chunk from far edge to pad `arr` after wrap_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[1]) for (i, x) in enumerate(arr.shape)) wrap_chunk2 = arr[wrap_slice] if pad_amt[1] == 1: wrap_chunk2 = wrap_chunk2.reshape(pad_singleton) # Concatenate `arr` with both chunks, extending along `axis` return np.concatenate((wrap_chunk1, arr, wrap_chunk2), axis=axis) def _normalize_shape(ndarray, shape, cast_to_int=True): """ Private function which does some checks and normalizes the possibly much simpler representations of 'pad_width', 'stat_length', 'constant_values', 'end_values'. Parameters ---------- narray : ndarray Input ndarray shape : {sequence, array_like, float, int}, optional The width of padding (pad_width), the number of elements on the edge of the narray used for statistics (stat_length), the constant value(s) to use when filling padded regions (constant_values), or the endpoint target(s) for linear ramps (end_values). ((before_1, after_1), ... (before_N, after_N)) unique number of elements for each axis where `N` is rank of `narray`. ((before, after),) yields same before and after constants for each axis. (constant,) or val is a shortcut for before = after = constant for all axes. cast_to_int : bool, optional Controls if values in ``shape`` will be rounded and cast to int before being returned. Returns ------- normalized_shape : tuple of tuples val => ((val, val), (val, val), ...) [[val1, val2], [val3, val4], ...] => ((val1, val2), (val3, val4), ...) ((val1, val2), (val3, val4), ...) => no change [[val1, val2], ] => ((val1, val2), (val1, val2), ...) ((val1, val2), ) => ((val1, val2), (val1, val2), ...) [[val , ], ] => ((val, val), (val, val), ...) ((val , ), ) => ((val, val), (val, val), ...) """ ndims = ndarray.ndim # Shortcut shape=None if shape is None: return ((None, None), ) * ndims # Convert any input `info` to a NumPy array arr = np.asarray(shape) # Switch based on what input looks like if arr.ndim <= 1: if arr.shape == () or arr.shape == (1,): # Single scalar input # Create new array of ones, multiply by the scalar arr = np.ones((ndims, 2), dtype=ndarray.dtype) * arr elif arr.shape == (2,): # Apply padding (before, after) each axis # Create new axis 0, repeat along it for every axis arr = arr[np.newaxis, :].repeat(ndims, axis=0) else: fmt = "Unable to create correctly shaped tuple from %s" raise ValueError(fmt % (shape,)) elif arr.ndim == 2: if arr.shape[1] == 1 and arr.shape[0] == ndims: # Padded before and after by the same amount arr = arr.repeat(2, axis=1) elif arr.shape[0] == ndims: # Input correctly formatted, pass it on as `arr` arr = shape else: fmt = "Unable to create correctly shaped tuple from %s" raise ValueError(fmt % (shape,)) else: fmt = "Unable to create correctly shaped tuple from %s" raise ValueError(fmt % (shape,)) # Cast if necessary if cast_to_int is True: arr = np.round(arr).astype(int) # Convert list of lists to tuple of tuples return tuple(tuple(axis) for axis in arr.tolist()) def _validate_lengths(narray, number_elements): """ Private function which does some checks and reformats pad_width and stat_length using _normalize_shape. Parameters ---------- narray : ndarray Input ndarray number_elements : {sequence, int}, optional The width of padding (pad_width) or the number of elements on the edge of the narray used for statistics (stat_length). ((before_1, after_1), ... (before_N, after_N)) unique number of elements for each axis. ((before, after),) yields same before and after constants for each axis. (constant,) or int is a shortcut for before = after = constant for all axes. Returns ------- _validate_lengths : tuple of tuples int => ((int, int), (int, int), ...) [[int1, int2], [int3, int4], ...] => ((int1, int2), (int3, int4), ...) ((int1, int2), (int3, int4), ...) => no change [[int1, int2], ] => ((int1, int2), (int1, int2), ...) ((int1, int2), ) => ((int1, int2), (int1, int2), ...) [[int , ], ] => ((int, int), (int, int), ...) ((int , ), ) => ((int, int), (int, int), ...) """ normshp = _normalize_shape(narray, number_elements) for i in normshp: chk = [1 if x is None else x for x in i] chk = [1 if x >= 0 else -1 for x in chk] if (chk[0] < 0) or (chk[1] < 0): fmt = "%s cannot contain negative values." raise ValueError(fmt % (number_elements,)) return normshp ############################################################################### # Public functions def pad(array, pad_width, mode=None, **kwargs): """ Pads an array. Parameters ---------- array : array_like of rank N Input array pad_width : {sequence, array_like, int} Number of values padded to the edges of each axis. ((before_1, after_1), ... (before_N, after_N)) unique pad widths for each axis. ((before, after),) yields same before and after pad for each axis. (pad,) or int is a shortcut for before = after = pad width for all axes. mode : str or function One of the following string values or a user supplied function. 'constant' Pads with a constant value. 'edge' Pads with the edge values of array. 'linear_ramp' Pads with the linear ramp between end_value and the array edge value. 'maximum' Pads with the maximum value of all or part of the vector along each axis. 'mean' Pads with the mean value of all or part of the vector along each axis. 'median' Pads with the median value of all or part of the vector along each axis. 'minimum' Pads with the minimum value of all or part of the vector along each axis. 'reflect' Pads with the reflection of the vector mirrored on the first and last values of the vector along each axis. 'symmetric' Pads with the reflection of the vector mirrored along the edge of the array. 'wrap' Pads with the wrap of the vector along the axis. The first values are used to pad the end and the end values are used to pad the beginning. <function> Padding function, see Notes. stat_length : sequence or int, optional Used in 'maximum', 'mean', 'median', and 'minimum'. Number of values at edge of each axis used to calculate the statistic value. ((before_1, after_1), ... (before_N, after_N)) unique statistic lengths for each axis. ((before, after),) yields same before and after statistic lengths for each axis. (stat_length,) or int is a shortcut for before = after = statistic length for all axes. Default is ``None``, to use the entire axis. constant_values : sequence or int, optional Used in 'constant'. The values to set the padded values for each axis. ((before_1, after_1), ... (before_N, after_N)) unique pad constants for each axis. ((before, after),) yields same before and after constants for each axis. (constant,) or int is a shortcut for before = after = constant for all axes. Default is 0. end_values : sequence or int, optional Used in 'linear_ramp'. The values used for the ending value of the linear_ramp and that will form the edge of the padded array. ((before_1, after_1), ... (before_N, after_N)) unique end values for each axis. ((before, after),) yields same before and after end values for each axis. (constant,) or int is a shortcut for before = after = end value for all axes. Default is 0. reflect_type : {'even', 'odd'}, optional Used in 'reflect', and 'symmetric'. The 'even' style is the default with an unaltered reflection around the edge value. For the 'odd' style, the extented part of the array is created by subtracting the reflected values from two times the edge value. Returns ------- pad : ndarray Padded array of rank equal to `array` with shape increased according to `pad_width`. Notes ----- .. versionadded:: 1.7.0 For an array with rank greater than 1, some of the padding of later axes is calculated from padding of previous axes. This is easiest to think about with a rank 2 array where the corners of the padded array are calculated by using padded values from the first axis. The padding function, if used, should return a rank 1 array equal in length to the vector argument with padded values replaced. It has the following signature:: padding_func(vector, iaxis_pad_width, iaxis, **kwargs) where vector : ndarray A rank 1 array already padded with zeros. Padded values are vector[:pad_tuple[0]] and vector[-pad_tuple[1]:]. iaxis_pad_width : tuple A 2-tuple of ints, iaxis_pad_width[0] represents the number of values padded at the beginning of vector where iaxis_pad_width[1] represents the number of values padded at the end of vector. iaxis : int The axis currently being calculated. kwargs : misc Any keyword arguments the function requires. Examples -------- >>> a = [1, 2, 3, 4, 5] >>> np.lib.pad(a, (2,3), 'constant', constant_values=(4, 6)) array([4, 4, 1, 2, 3, 4, 5, 6, 6, 6]) >>> np.lib.pad(a, (2, 3), 'edge') array([1, 1, 1, 2, 3, 4, 5, 5, 5, 5]) >>> np.lib.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4)) array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4]) >>> np.lib.pad(a, (2,), 'maximum') array([5, 5, 1, 2, 3, 4, 5, 5, 5]) >>> np.lib.pad(a, (2,), 'mean') array([3, 3, 1, 2, 3, 4, 5, 3, 3]) >>> np.lib.pad(a, (2,), 'median') array([3, 3, 1, 2, 3, 4, 5, 3, 3]) >>> a = [[1, 2], [3, 4]] >>> np.lib.pad(a, ((3, 2), (2, 3)), 'minimum') array([[1, 1, 1, 2, 1, 1, 1], [1, 1, 1, 2, 1, 1, 1], [1, 1, 1, 2, 1, 1, 1], [1, 1, 1, 2, 1, 1, 1], [3, 3, 3, 4, 3, 3, 3], [1, 1, 1, 2, 1, 1, 1], [1, 1, 1, 2, 1, 1, 1]]) >>> a = [1, 2, 3, 4, 5] >>> np.lib.pad(a, (2, 3), 'reflect') array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2]) >>> np.lib.pad(a, (2, 3), 'reflect', reflect_type='odd') array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8]) >>> np.lib.pad(a, (2, 3), 'symmetric') array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3]) >>> np.lib.pad(a, (2, 3), 'symmetric', reflect_type='odd') array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7]) >>> np.lib.pad(a, (2, 3), 'wrap') array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3]) >>> def padwithtens(vector, pad_width, iaxis, kwargs): ... vector[:pad_width[0]] = 10 ... vector[-pad_width[1]:] = 10 ... return vector >>> a = np.arange(6) >>> a = a.reshape((2, 3)) >>> np.lib.pad(a, 2, padwithtens) array([[10, 10, 10, 10, 10, 10, 10], [10, 10, 10, 10, 10, 10, 10], [10, 10, 0, 1, 2, 10, 10], [10, 10, 3, 4, 5, 10, 10], [10, 10, 10, 10, 10, 10, 10], [10, 10, 10, 10, 10, 10, 10]]) """ if not np.asarray(pad_width).dtype.kind == 'i': raise TypeError('`pad_width` must be of integral type.') narray = np.array(array) pad_width = _validate_lengths(narray, pad_width) allowedkwargs = { 'constant': ['constant_values'], 'edge': [], 'linear_ramp': ['end_values'], 'maximum': ['stat_length'], 'mean': ['stat_length'], 'median': ['stat_length'], 'minimum': ['stat_length'], 'reflect': ['reflect_type'], 'symmetric': ['reflect_type'], 'wrap': [], } kwdefaults = { 'stat_length': None, 'constant_values': 0, 'end_values': 0, 'reflect_type': 'even', } if isinstance(mode, str): # Make sure have allowed kwargs appropriate for mode for key in kwargs: if key not in allowedkwargs[mode]: raise ValueError('%s keyword not in allowed keywords %s' % (key, allowedkwargs[mode])) # Set kwarg defaults for kw in allowedkwargs[mode]: kwargs.setdefault(kw, kwdefaults[kw]) # Need to only normalize particular keywords. for i in kwargs: if i == 'stat_length': kwargs[i] = _validate_lengths(narray, kwargs[i]) if i in ['end_values', 'constant_values']: kwargs[i] = _normalize_shape(narray, kwargs[i], cast_to_int=False) elif mode is None: raise ValueError('Keyword "mode" must be a function or one of %s.' % (list(allowedkwargs.keys()),)) else: # Drop back to old, slower np.apply_along_axis mode for user-supplied # vector function function = mode # Create a new padded array rank = list(range(len(narray.shape))) total_dim_increase = [np.sum(pad_width[i]) for i in rank] offset_slices = [slice(pad_width[i][0], pad_width[i][0] + narray.shape[i]) for i in rank] new_shape = np.array(narray.shape) + total_dim_increase newmat = np.zeros(new_shape, narray.dtype) # Insert the original array into the padded array newmat[offset_slices] = narray # This is the core of pad ... for iaxis in rank: np.apply_along_axis(function, iaxis, newmat, pad_width[iaxis], iaxis, kwargs) return newmat # If we get here, use new padding method newmat = narray.copy() # API preserved, but completely new algorithm which pads by building the # entire block to pad before/after `arr` with in one step, for each axis. if mode == 'constant': for axis, ((pad_before, pad_after), (before_val, after_val)) \ in enumerate(zip(pad_width, kwargs['constant_values'])): newmat = _prepend_const(newmat, pad_before, before_val, axis) newmat = _append_const(newmat, pad_after, after_val, axis) elif mode == 'edge': for axis, (pad_before, pad_after) in enumerate(pad_width): newmat = _prepend_edge(newmat, pad_before, axis) newmat = _append_edge(newmat, pad_after, axis) elif mode == 'linear_ramp': for axis, ((pad_before, pad_after), (before_val, after_val)) \ in enumerate(zip(pad_width, kwargs['end_values'])): newmat = _prepend_ramp(newmat, pad_before, before_val, axis) newmat = _append_ramp(newmat, pad_after, after_val, axis) elif mode == 'maximum': for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \ in enumerate(zip(pad_width, kwargs['stat_length'])): newmat = _prepend_max(newmat, pad_before, chunk_before, axis) newmat = _append_max(newmat, pad_after, chunk_after, axis) elif mode == 'mean': for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \ in enumerate(zip(pad_width, kwargs['stat_length'])): newmat = _prepend_mean(newmat, pad_before, chunk_before, axis) newmat = _append_mean(newmat, pad_after, chunk_after, axis) elif mode == 'median': for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \ in enumerate(zip(pad_width, kwargs['stat_length'])): newmat = _prepend_med(newmat, pad_before, chunk_before, axis) newmat = _append_med(newmat, pad_after, chunk_after, axis) elif mode == 'minimum': for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \ in enumerate(zip(pad_width, kwargs['stat_length'])): newmat = _prepend_min(newmat, pad_before, chunk_before, axis) newmat = _append_min(newmat, pad_after, chunk_after, axis) elif mode == 'reflect': for axis, (pad_before, pad_after) in enumerate(pad_width): # Recursive padding along any axis where `pad_amt` is too large # for indexing tricks. We can only safely pad the original axis # length, to keep the period of the reflections consistent. if ((pad_before > 0) or (pad_after > 0)) and newmat.shape[axis] == 1: # Extending singleton dimension for 'reflect' is legacy # behavior; it really should raise an error. newmat = _prepend_edge(newmat, pad_before, axis) newmat = _append_edge(newmat, pad_after, axis) continue method = kwargs['reflect_type'] safe_pad = newmat.shape[axis] - 1 while ((pad_before > safe_pad) or (pad_after > safe_pad)): pad_iter_b = min(safe_pad, safe_pad * (pad_before // safe_pad)) pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad)) newmat = _pad_ref(newmat, (pad_iter_b, pad_iter_a), method, axis) pad_before -= pad_iter_b pad_after -= pad_iter_a safe_pad += pad_iter_b + pad_iter_a newmat = _pad_ref(newmat, (pad_before, pad_after), method, axis) elif mode == 'symmetric': for axis, (pad_before, pad_after) in enumerate(pad_width): # Recursive padding along any axis where `pad_amt` is too large # for indexing tricks. We can only safely pad the original axis # length, to keep the period of the reflections consistent. method = kwargs['reflect_type'] safe_pad = newmat.shape[axis] while ((pad_before > safe_pad) or (pad_after > safe_pad)): pad_iter_b = min(safe_pad, safe_pad * (pad_before // safe_pad)) pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad)) newmat = _pad_sym(newmat, (pad_iter_b, pad_iter_a), method, axis) pad_before -= pad_iter_b pad_after -= pad_iter_a safe_pad += pad_iter_b + pad_iter_a newmat = _pad_sym(newmat, (pad_before, pad_after), method, axis) elif mode == 'wrap': for axis, (pad_before, pad_after) in enumerate(pad_width): # Recursive padding along any axis where `pad_amt` is too large # for indexing tricks. We can only safely pad the original axis # length, to keep the period of the reflections consistent. safe_pad = newmat.shape[axis] while ((pad_before > safe_pad) or (pad_after > safe_pad)): pad_iter_b = min(safe_pad, safe_pad * (pad_before // safe_pad)) pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad)) newmat = _pad_wrap(newmat, (pad_iter_b, pad_iter_a), axis) pad_before -= pad_iter_b pad_after -= pad_iter_a safe_pad += pad_iter_b + pad_iter_a newmat = _pad_wrap(newmat, (pad_before, pad_after), axis) return newmat
34.866489
79
0.579629
from __future__ import division, absolute_import, print_function import numpy as np __all__ = ['pad'] t, axis) slope = (end - edge_pad) / float(pad_amt) ramp_arr = ramp_arr * slope ramp_arr += edge_pad _round_ifneeded(ramp_arr, arr.dtype) return np.concatenate((ramp_arr.astype(arr.dtype), arr), axis=axis) def _append_ramp(arr, pad_amt, end, axis=-1): if pad_amt == 0: return arr padshape = tuple(x if i != axis else pad_amt for (i, x) in enumerate(arr.shape)) ramp_arr = _arange_ndarray(arr, padshape, axis, reverse=False).astype(np.float64) edge_slice = tuple(slice(None) if i != axis else -1 for (i, x) in enumerate(arr.shape)) pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis) slope = (end - edge_pad) / float(pad_amt) ramp_arr = ramp_arr * slope ramp_arr += edge_pad _round_ifneeded(ramp_arr, arr.dtype) return np.concatenate((arr, ramp_arr.astype(arr.dtype)), axis=axis) def _prepend_max(arr, pad_amt, num, axis=-1): if pad_amt == 0: return arr if num == 1: return _prepend_edge(arr, pad_amt, axis) if num is not None: if num >= arr.shape[axis]: num = None max_slice = tuple(slice(None) if i != axis else slice(num) for (i, x) in enumerate(arr.shape)) pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton) return np.concatenate((max_chunk.repeat(pad_amt, axis=axis), arr), axis=axis) def _append_max(arr, pad_amt, num, axis=-1): if pad_amt == 0: return arr if num == 1: return _append_edge(arr, pad_amt, axis) if num is not None: if num >= arr.shape[axis]: num = None end = arr.shape[axis] - 1 if num is not None: max_slice = tuple( slice(None) if i != axis else slice(end, end - num, -1) for (i, x) in enumerate(arr.shape)) else: max_slice = tuple(slice(None) for x in arr.shape) pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton) return np.concatenate((arr, max_chunk.repeat(pad_amt, axis=axis)), axis=axis) def _prepend_mean(arr, pad_amt, num, axis=-1): if pad_amt == 0: return arr if num == 1: return _prepend_edge(arr, pad_amt, axis) if num is not None: if num >= arr.shape[axis]: num = None mean_slice = tuple(slice(None) if i != axis else slice(num) for (i, x) in enumerate(arr.shape)) pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) mean_chunk = arr[mean_slice].mean(axis).reshape(pad_singleton) _round_ifneeded(mean_chunk, arr.dtype) return np.concatenate((mean_chunk.repeat(pad_amt, axis).astype(arr.dtype), arr), axis=axis) def _append_mean(arr, pad_amt, num, axis=-1): if pad_amt == 0: return arr if num == 1: return _append_edge(arr, pad_amt, axis) if num is not None: if num >= arr.shape[axis]: num = None end = arr.shape[axis] - 1 if num is not None: mean_slice = tuple( slice(None) if i != axis else slice(end, end - num, -1) for (i, x) in enumerate(arr.shape)) else: mean_slice = tuple(slice(None) for x in arr.shape) pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) mean_chunk = arr[mean_slice].mean(axis=axis).reshape(pad_singleton) _round_ifneeded(mean_chunk, arr.dtype) return np.concatenate( (arr, mean_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis) def _prepend_med(arr, pad_amt, num, axis=-1): if pad_amt == 0: return arr if num == 1: return _prepend_edge(arr, pad_amt, axis) if num is not None: if num >= arr.shape[axis]: num = None med_slice = tuple(slice(None) if i != axis else slice(num) for (i, x) in enumerate(arr.shape)) pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton) _round_ifneeded(med_chunk, arr.dtype) return np.concatenate( (med_chunk.repeat(pad_amt, axis).astype(arr.dtype), arr), axis=axis) def _append_med(arr, pad_amt, num, axis=-1): if pad_amt == 0: return arr if num == 1: return _append_edge(arr, pad_amt, axis) if num is not None: if num >= arr.shape[axis]: num = None end = arr.shape[axis] - 1 if num is not None: med_slice = tuple( slice(None) if i != axis else slice(end, end - num, -1) for (i, x) in enumerate(arr.shape)) else: med_slice = tuple(slice(None) for x in arr.shape) pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton) _round_ifneeded(med_chunk, arr.dtype) return np.concatenate( (arr, med_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis) def _prepend_min(arr, pad_amt, num, axis=-1): if pad_amt == 0: return arr if num == 1: return _prepend_edge(arr, pad_amt, axis) if num is not None: if num >= arr.shape[axis]: num = None min_slice = tuple(slice(None) if i != axis else slice(num) for (i, x) in enumerate(arr.shape)) pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton) return np.concatenate((min_chunk.repeat(pad_amt, axis=axis), arr), axis=axis) def _append_min(arr, pad_amt, num, axis=-1): if pad_amt == 0: return arr if num == 1: return _append_edge(arr, pad_amt, axis) if num is not None: if num >= arr.shape[axis]: num = None end = arr.shape[axis] - 1 if num is not None: min_slice = tuple( slice(None) if i != axis else slice(end, end - num, -1) for (i, x) in enumerate(arr.shape)) else: min_slice = tuple(slice(None) for x in arr.shape) pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton) return np.concatenate((arr, min_chunk.repeat(pad_amt, axis=axis)), axis=axis) def _pad_ref(arr, pad_amt, method, axis=-1): if pad_amt[0] == 0 and pad_amt[1] == 0: return arr
true
true
f70298a7b694786f2a04821421d50d8e82091c26
5,488
py
Python
test/integration/src/py/test_bulk_transfers.py
emeseight/sifnode
cf52c0cf622fd5113c0a1a8f704e5f62ef3ae4c3
[ "Apache-2.0" ]
85
2020-10-24T07:35:55.000Z
2022-03-27T10:27:55.000Z
test/integration/src/py/test_bulk_transfers.py
emeseight/sifnode
cf52c0cf622fd5113c0a1a8f704e5f62ef3ae4c3
[ "Apache-2.0" ]
1,760
2020-10-24T07:37:17.000Z
2022-03-31T17:59:44.000Z
test/integration/src/py/test_bulk_transfers.py
emeseight/sifnode
cf52c0cf622fd5113c0a1a8f704e5f62ef3ae4c3
[ "Apache-2.0" ]
115
2020-10-31T07:32:38.000Z
2022-03-29T21:12:25.000Z
import copy import json import logging import pytest import burn_lock_functions import test_utilities from integration_env_credentials import sifchain_cli_credentials_for_test from pytest_utilities import generate_minimal_test_account from test_utilities import EthereumToSifchainTransferRequest, SifchaincliCredentials def create_new_sifaddr(): new_account_key = test_utilities.get_shell_output("uuidgen") credentials = sifchain_cli_credentials_for_test(new_account_key) new_addr = burn_lock_functions.create_new_sifaddr(credentials=credentials, keyname=new_account_key) return new_addr["address"] def create_new_sifaddr_and_key(): new_account_key = test_utilities.get_shell_output("uuidgen") credentials = sifchain_cli_credentials_for_test(new_account_key) new_addr = burn_lock_functions.create_new_sifaddr(credentials=credentials, keyname=new_account_key) return new_addr["address"], new_addr["name"] @pytest.mark.skip(reason="run manually") def test_bulk_transfers( basic_transfer_request: EthereumToSifchainTransferRequest, smart_contracts_dir, source_ethereum_address, bridgebank_address, bridgetoken_address, ethereum_network, ): n_transfers = int(test_utilities.get_optional_env_var("NTRANSFERS", 2)) ganache_delay = test_utilities.get_optional_env_var("GANACHE_DELAY", 1) # test_utilities.get_shell_output(f"{integration_dir}/ganache_start.sh {ganache_delay}") amount = "{:d}".format(5 * test_utilities.highest_gas_cost) new_addresses_and_keys = list(map(lambda x: create_new_sifaddr_and_key(), range(n_transfers))) logging.info(f"aandk: {new_addresses_and_keys}") new_addresses = list(map(lambda a: a[0], new_addresses_and_keys)) logging.debug(f"new_addresses: {new_addresses}") new_eth_addrs = test_utilities.create_ethereum_addresses(smart_contracts_dir, basic_transfer_request.ethereum_network, len(new_addresses)) logging.info(f"new eth addrs: {new_eth_addrs}") request: EthereumToSifchainTransferRequest = copy.deepcopy(basic_transfer_request) requests = list(map(lambda addr: { "amount": amount, "symbol": test_utilities.NULL_ADDRESS, "sifchain_address": addr }, new_addresses)) json_requests = json.dumps(requests) test_utilities.run_yarn_command( " ".join([ f"yarn --cwd {smart_contracts_dir}", "integrationtest:sendBulkLockTx", f"--amount {amount}", f"--symbol eth", f"--json_path {request.solidity_json_path}", f"--sifchain_address {new_addresses[0]}", f"--transactions \'{json_requests}\'", f"--ethereum_address {source_ethereum_address}", f"--bridgebank_address {bridgebank_address}", f"--ethereum_network {ethereum_network}", ]) ) requests = list(map(lambda addr: { "amount": amount, "symbol": bridgetoken_address, "sifchain_address": addr }, new_addresses)) json_requests = json.dumps(requests) yarn_result = test_utilities.run_yarn_command( " ".join([ f"yarn --cwd {smart_contracts_dir}", "integrationtest:sendBulkLockTx", f"--amount {amount}", "--lock_or_burn burn", f"--symbol {bridgetoken_address}", f"--json_path {request.solidity_json_path}", f"--sifchain_address {new_addresses[0]}", f"--transactions \'{json_requests}\'", f"--ethereum_address {source_ethereum_address}", f"--bridgebank_address {bridgebank_address}", f"--ethereum_network {ethereum_network}", ]) ) logging.info(f"bulk result: {yarn_result}") manual_advance = False if manual_advance: test_utilities.advance_n_ethereum_blocks(test_utilities.n_wait_blocks, smart_contracts_dir) test_utilities.wait_for_ethereum_block_number(yarn_result["blockNumber"] + test_utilities.n_wait_blocks, basic_transfer_request); for a in new_addresses: test_utilities.wait_for_sif_account(a, basic_transfer_request.sifnoded_node, 90) test_utilities.wait_for_sifchain_addr_balance(a, "ceth", amount, basic_transfer_request.sifnoded_node, 180) test_utilities.wait_for_sifchain_addr_balance(a, "rowan", amount, basic_transfer_request.sifnoded_node, 180) text_file = open("pfile.cmds", "w") simple_credentials = SifchaincliCredentials( keyring_passphrase=None, keyring_backend="test", from_key=None, sifnoded_homedir=None ) logging.info(f"all accounts are on sifchain and have the correct balance") for sifaddr, ethaddr in zip(new_addresses_and_keys, new_eth_addrs): r = copy.deepcopy(basic_transfer_request) r.sifchain_address = sifaddr[0] r.ethereum_address = ethaddr["address"] r.amount = 100 simple_credentials.from_key = sifaddr[1] c = test_utilities.send_from_sifchain_to_ethereum_cmd(r, simple_credentials) text_file.write(f"{c}\n") text_file.close() # test_utilities.get_shell_output("cat pfile.cmds | parallel --trim lr -v {}") test_utilities.get_shell_output("bash -x pfile.cmds") for sifaddr, ethaddr in zip(new_addresses_and_keys, new_eth_addrs): r = copy.deepcopy(basic_transfer_request) r.ethereum_address = ethaddr["address"] r.amount = 100 test_utilities.wait_for_eth_balance(r, 100, 300)
44.983607
142
0.714103
import copy import json import logging import pytest import burn_lock_functions import test_utilities from integration_env_credentials import sifchain_cli_credentials_for_test from pytest_utilities import generate_minimal_test_account from test_utilities import EthereumToSifchainTransferRequest, SifchaincliCredentials def create_new_sifaddr(): new_account_key = test_utilities.get_shell_output("uuidgen") credentials = sifchain_cli_credentials_for_test(new_account_key) new_addr = burn_lock_functions.create_new_sifaddr(credentials=credentials, keyname=new_account_key) return new_addr["address"] def create_new_sifaddr_and_key(): new_account_key = test_utilities.get_shell_output("uuidgen") credentials = sifchain_cli_credentials_for_test(new_account_key) new_addr = burn_lock_functions.create_new_sifaddr(credentials=credentials, keyname=new_account_key) return new_addr["address"], new_addr["name"] @pytest.mark.skip(reason="run manually") def test_bulk_transfers( basic_transfer_request: EthereumToSifchainTransferRequest, smart_contracts_dir, source_ethereum_address, bridgebank_address, bridgetoken_address, ethereum_network, ): n_transfers = int(test_utilities.get_optional_env_var("NTRANSFERS", 2)) ganache_delay = test_utilities.get_optional_env_var("GANACHE_DELAY", 1) amount = "{:d}".format(5 * test_utilities.highest_gas_cost) new_addresses_and_keys = list(map(lambda x: create_new_sifaddr_and_key(), range(n_transfers))) logging.info(f"aandk: {new_addresses_and_keys}") new_addresses = list(map(lambda a: a[0], new_addresses_and_keys)) logging.debug(f"new_addresses: {new_addresses}") new_eth_addrs = test_utilities.create_ethereum_addresses(smart_contracts_dir, basic_transfer_request.ethereum_network, len(new_addresses)) logging.info(f"new eth addrs: {new_eth_addrs}") request: EthereumToSifchainTransferRequest = copy.deepcopy(basic_transfer_request) requests = list(map(lambda addr: { "amount": amount, "symbol": test_utilities.NULL_ADDRESS, "sifchain_address": addr }, new_addresses)) json_requests = json.dumps(requests) test_utilities.run_yarn_command( " ".join([ f"yarn --cwd {smart_contracts_dir}", "integrationtest:sendBulkLockTx", f"--amount {amount}", f"--symbol eth", f"--json_path {request.solidity_json_path}", f"--sifchain_address {new_addresses[0]}", f"--transactions \'{json_requests}\'", f"--ethereum_address {source_ethereum_address}", f"--bridgebank_address {bridgebank_address}", f"--ethereum_network {ethereum_network}", ]) ) requests = list(map(lambda addr: { "amount": amount, "symbol": bridgetoken_address, "sifchain_address": addr }, new_addresses)) json_requests = json.dumps(requests) yarn_result = test_utilities.run_yarn_command( " ".join([ f"yarn --cwd {smart_contracts_dir}", "integrationtest:sendBulkLockTx", f"--amount {amount}", "--lock_or_burn burn", f"--symbol {bridgetoken_address}", f"--json_path {request.solidity_json_path}", f"--sifchain_address {new_addresses[0]}", f"--transactions \'{json_requests}\'", f"--ethereum_address {source_ethereum_address}", f"--bridgebank_address {bridgebank_address}", f"--ethereum_network {ethereum_network}", ]) ) logging.info(f"bulk result: {yarn_result}") manual_advance = False if manual_advance: test_utilities.advance_n_ethereum_blocks(test_utilities.n_wait_blocks, smart_contracts_dir) test_utilities.wait_for_ethereum_block_number(yarn_result["blockNumber"] + test_utilities.n_wait_blocks, basic_transfer_request); for a in new_addresses: test_utilities.wait_for_sif_account(a, basic_transfer_request.sifnoded_node, 90) test_utilities.wait_for_sifchain_addr_balance(a, "ceth", amount, basic_transfer_request.sifnoded_node, 180) test_utilities.wait_for_sifchain_addr_balance(a, "rowan", amount, basic_transfer_request.sifnoded_node, 180) text_file = open("pfile.cmds", "w") simple_credentials = SifchaincliCredentials( keyring_passphrase=None, keyring_backend="test", from_key=None, sifnoded_homedir=None ) logging.info(f"all accounts are on sifchain and have the correct balance") for sifaddr, ethaddr in zip(new_addresses_and_keys, new_eth_addrs): r = copy.deepcopy(basic_transfer_request) r.sifchain_address = sifaddr[0] r.ethereum_address = ethaddr["address"] r.amount = 100 simple_credentials.from_key = sifaddr[1] c = test_utilities.send_from_sifchain_to_ethereum_cmd(r, simple_credentials) text_file.write(f"{c}\n") text_file.close() test_utilities.get_shell_output("bash -x pfile.cmds") for sifaddr, ethaddr in zip(new_addresses_and_keys, new_eth_addrs): r = copy.deepcopy(basic_transfer_request) r.ethereum_address = ethaddr["address"] r.amount = 100 test_utilities.wait_for_eth_balance(r, 100, 300)
true
true
f702992dc148b3f53c8d0340cf67d9897004131c
5,174
py
Python
src/pip/_internal/build_env.py
atse/pip
b47b2fa8e6a70f2d8c93b14a1292bf6eb82b6355
[ "MIT" ]
null
null
null
src/pip/_internal/build_env.py
atse/pip
b47b2fa8e6a70f2d8c93b14a1292bf6eb82b6355
[ "MIT" ]
null
null
null
src/pip/_internal/build_env.py
atse/pip
b47b2fa8e6a70f2d8c93b14a1292bf6eb82b6355
[ "MIT" ]
null
null
null
"""Build Environment used for isolation during sdist building """ import logging import os import sys import textwrap from distutils.sysconfig import get_python_lib from sysconfig import get_paths from pip._vendor.pkg_resources import Requirement, VersionConflict, WorkingSet from pip import __file__ as pip_location from pip._internal.utils.misc import call_subprocess from pip._internal.utils.temp_dir import TempDirectory from pip._internal.utils.ui import open_spinner logger = logging.getLogger(__name__) class BuildEnvironment(object): """Creates and manages an isolated environment to install build deps """ def __init__(self): self._temp_dir = TempDirectory(kind="build-env") self._temp_dir.create() @property def path(self): return self._temp_dir.path def __enter__(self): self.save_path = os.environ.get('PATH', None) self.save_pythonpath = os.environ.get('PYTHONPATH', None) self.save_nousersite = os.environ.get('PYTHONNOUSERSITE', None) install_scheme = 'nt' if (os.name == 'nt') else 'posix_prefix' install_dirs = get_paths(install_scheme, vars={ 'base': self.path, 'platbase': self.path, }) scripts = install_dirs['scripts'] if self.save_path: os.environ['PATH'] = scripts + os.pathsep + self.save_path else: os.environ['PATH'] = scripts + os.pathsep + os.defpath # Note: prefer distutils' sysconfig to get the # library paths so PyPy is correctly supported. purelib = get_python_lib(plat_specific=0, prefix=self.path) platlib = get_python_lib(plat_specific=1, prefix=self.path) if purelib == platlib: lib_dirs = purelib else: lib_dirs = purelib + os.pathsep + platlib if self.save_pythonpath: os.environ['PYTHONPATH'] = lib_dirs + os.pathsep + \ self.save_pythonpath else: os.environ['PYTHONPATH'] = lib_dirs os.environ['PYTHONNOUSERSITE'] = '1' # Ensure .pth files are honored. with open(os.path.join(purelib, 'sitecustomize.py'), 'w') as fp: fp.write(textwrap.dedent( ''' import site site.addsitedir({!r}) ''' ).format(purelib)) return self.path def __exit__(self, exc_type, exc_val, exc_tb): def restore_var(varname, old_value): if old_value is None: os.environ.pop(varname, None) else: os.environ[varname] = old_value restore_var('PATH', self.save_path) restore_var('PYTHONPATH', self.save_pythonpath) restore_var('PYTHONNOUSERSITE', self.save_nousersite) def cleanup(self): self._temp_dir.cleanup() def missing_requirements(self, reqs): """Return a list of the requirements from reqs that are not present """ missing = [] with self: ws = WorkingSet(os.environ["PYTHONPATH"].split(os.pathsep)) for req in reqs: try: if ws.find(Requirement.parse(req)) is None: missing.append(req) except VersionConflict: missing.append(req) return missing def install_requirements(self, finder, requirements, message): args = [ sys.executable, os.path.dirname(pip_location), 'install', '--ignore-installed', '--no-user', '--prefix', self.path, '--no-warn-script-location', ] if logger.getEffectiveLevel() <= logging.DEBUG: args.append('-v') for format_control in ('no_binary', 'only_binary'): formats = getattr(finder.format_control, format_control) args.extend(('--' + format_control.replace('_', '-'), ','.join(sorted(formats or {':none:'})))) if finder.index_urls: args.extend(['-i', finder.index_urls[0]]) for extra_index in finder.index_urls[1:]: args.extend(['--extra-index-url', extra_index]) else: args.append('--no-index') for link in finder.find_links: args.extend(['--find-links', link]) for _, host, _ in finder.secure_origins: args.extend(['--trusted-host', host]) if finder.allow_all_prereleases: args.append('--pre') if finder.process_dependency_links: args.append('--process-dependency-links') args.append('--') args.extend(requirements) with open_spinner(message) as spinner: call_subprocess(args, show_stdout=False, spinner=spinner) class NoOpBuildEnvironment(BuildEnvironment): """A no-op drop-in replacement for BuildEnvironment """ def __init__(self): pass def __enter__(self): pass def __exit__(self, exc_type, exc_val, exc_tb): pass def cleanup(self): pass def install_requirements(self, finder, requirements, message): raise NotImplementedError()
33.380645
78
0.603402
import logging import os import sys import textwrap from distutils.sysconfig import get_python_lib from sysconfig import get_paths from pip._vendor.pkg_resources import Requirement, VersionConflict, WorkingSet from pip import __file__ as pip_location from pip._internal.utils.misc import call_subprocess from pip._internal.utils.temp_dir import TempDirectory from pip._internal.utils.ui import open_spinner logger = logging.getLogger(__name__) class BuildEnvironment(object): def __init__(self): self._temp_dir = TempDirectory(kind="build-env") self._temp_dir.create() @property def path(self): return self._temp_dir.path def __enter__(self): self.save_path = os.environ.get('PATH', None) self.save_pythonpath = os.environ.get('PYTHONPATH', None) self.save_nousersite = os.environ.get('PYTHONNOUSERSITE', None) install_scheme = 'nt' if (os.name == 'nt') else 'posix_prefix' install_dirs = get_paths(install_scheme, vars={ 'base': self.path, 'platbase': self.path, }) scripts = install_dirs['scripts'] if self.save_path: os.environ['PATH'] = scripts + os.pathsep + self.save_path else: os.environ['PATH'] = scripts + os.pathsep + os.defpath # library paths so PyPy is correctly supported. purelib = get_python_lib(plat_specific=0, prefix=self.path) platlib = get_python_lib(plat_specific=1, prefix=self.path) if purelib == platlib: lib_dirs = purelib else: lib_dirs = purelib + os.pathsep + platlib if self.save_pythonpath: os.environ['PYTHONPATH'] = lib_dirs + os.pathsep + \ self.save_pythonpath else: os.environ['PYTHONPATH'] = lib_dirs os.environ['PYTHONNOUSERSITE'] = '1' # Ensure .pth files are honored. with open(os.path.join(purelib, 'sitecustomize.py'), 'w') as fp: fp.write(textwrap.dedent( ''' import site site.addsitedir({!r}) ''' ).format(purelib)) return self.path def __exit__(self, exc_type, exc_val, exc_tb): def restore_var(varname, old_value): if old_value is None: os.environ.pop(varname, None) else: os.environ[varname] = old_value restore_var('PATH', self.save_path) restore_var('PYTHONPATH', self.save_pythonpath) restore_var('PYTHONNOUSERSITE', self.save_nousersite) def cleanup(self): self._temp_dir.cleanup() def missing_requirements(self, reqs): missing = [] with self: ws = WorkingSet(os.environ["PYTHONPATH"].split(os.pathsep)) for req in reqs: try: if ws.find(Requirement.parse(req)) is None: missing.append(req) except VersionConflict: missing.append(req) return missing def install_requirements(self, finder, requirements, message): args = [ sys.executable, os.path.dirname(pip_location), 'install', '--ignore-installed', '--no-user', '--prefix', self.path, '--no-warn-script-location', ] if logger.getEffectiveLevel() <= logging.DEBUG: args.append('-v') for format_control in ('no_binary', 'only_binary'): formats = getattr(finder.format_control, format_control) args.extend(('--' + format_control.replace('_', '-'), ','.join(sorted(formats or {':none:'})))) if finder.index_urls: args.extend(['-i', finder.index_urls[0]]) for extra_index in finder.index_urls[1:]: args.extend(['--extra-index-url', extra_index]) else: args.append('--no-index') for link in finder.find_links: args.extend(['--find-links', link]) for _, host, _ in finder.secure_origins: args.extend(['--trusted-host', host]) if finder.allow_all_prereleases: args.append('--pre') if finder.process_dependency_links: args.append('--process-dependency-links') args.append('--') args.extend(requirements) with open_spinner(message) as spinner: call_subprocess(args, show_stdout=False, spinner=spinner) class NoOpBuildEnvironment(BuildEnvironment): def __init__(self): pass def __enter__(self): pass def __exit__(self, exc_type, exc_val, exc_tb): pass def cleanup(self): pass def install_requirements(self, finder, requirements, message): raise NotImplementedError()
true
true
f70299a27cf778ccd0e0029d544c3610372d7746
5,483
py
Python
src/negotiating_agent/test_agent/TU-Delft-Collaborative-AI-Negotiation/agents/random_agent/random_agent.py
HahaBill/CollaborativeAI
f771cd2f34774c74c58e49a7e983d6244ea35eff
[ "MIT" ]
1
2022-02-17T19:14:46.000Z
2022-02-17T19:14:46.000Z
src/negotiating_agent/test_agent/TU-Delft-Collaborative-AI-Negotiation/agents/random_agent/random_agent.py
HahaBill/CollaborativeAI
f771cd2f34774c74c58e49a7e983d6244ea35eff
[ "MIT" ]
null
null
null
src/negotiating_agent/test_agent/TU-Delft-Collaborative-AI-Negotiation/agents/random_agent/random_agent.py
HahaBill/CollaborativeAI
f771cd2f34774c74c58e49a7e983d6244ea35eff
[ "MIT" ]
null
null
null
import logging from random import randint import traceback from typing import cast, Dict, List, Set, Collection from geniusweb.actions.Accept import Accept from geniusweb.actions.Action import Action from geniusweb.actions.LearningDone import LearningDone from geniusweb.actions.Offer import Offer from geniusweb.actions.PartyId import PartyId from geniusweb.actions.Vote import Vote from geniusweb.actions.Votes import Votes from geniusweb.bidspace.AllBidsList import AllBidsList from geniusweb.inform.ActionDone import ActionDone from geniusweb.inform.Finished import Finished from geniusweb.inform.Inform import Inform from geniusweb.inform.OptIn import OptIn from geniusweb.inform.Settings import Settings from geniusweb.inform.Voting import Voting from geniusweb.inform.YourTurn import YourTurn from geniusweb.issuevalue.Bid import Bid from geniusweb.issuevalue.Domain import Domain from geniusweb.issuevalue.Value import Value from geniusweb.issuevalue.ValueSet import ValueSet from geniusweb.party.Capabilities import Capabilities from geniusweb.party.DefaultParty import DefaultParty from geniusweb.profile.utilityspace.UtilitySpace import UtilitySpace from geniusweb.profileconnection.ProfileConnectionFactory import ( ProfileConnectionFactory, ) from geniusweb.progress.ProgressRounds import ProgressRounds from geniusweb.utils import val class RandomAgent(DefaultParty): """ Offers random bids until a bid with sufficient utility is offered. """ def __init__(self): super().__init__() self.getReporter().log(logging.INFO, "party is initialized") self._profile = None self._lastReceivedBid: Bid = None # Override def notifyChange(self, info: Inform): # self.getReporter().log(logging.INFO,"received info:"+str(info)) if isinstance(info, Settings): self._settings: Settings = cast(Settings, info) self._me = self._settings.getID() self._protocol: str = str(self._settings.getProtocol().getURI()) self._progress = self._settings.getProgress() if "Learn" == self._protocol: self.getConnection().send(LearningDone(self._me)) # type:ignore else: self._profile = ProfileConnectionFactory.create( info.getProfile().getURI(), self.getReporter() ) elif isinstance(info, ActionDone): action: Action = cast(ActionDone, info).getAction() if isinstance(action, Offer): self._lastReceivedBid = cast(Offer, action).getBid() elif isinstance(info, YourTurn): self._myTurn() if isinstance(self._progress, ProgressRounds): self._progress = self._progress.advance() elif isinstance(info, Finished): self.terminate() elif isinstance(info, Voting): # MOPAC protocol self._lastvotes = self._vote(cast(Voting, info)) val(self.getConnection()).send(self._lastvotes) elif isinstance(info, OptIn): val(self.getConnection()).send(self._lastvotes) else: self.getReporter().log( logging.WARNING, "Ignoring unknown info " + str(info) ) # Override def getCapabilities(self) -> Capabilities: return Capabilities( set(["SAOP", "Learn", "MOPAC"]), set(["geniusweb.profile.utilityspace.LinearAdditive"]), ) # Override def getDescription(self) -> str: return "Offers random bids until a bid with sufficient utility is offered. Parameters minPower and maxPower can be used to control voting behaviour." # Override def terminate(self): self.getReporter().log(logging.INFO, "party is terminating:") super().terminate() if self._profile != None: self._profile.close() self._profile = None def _myTurn(self): if self._isGood(self._lastReceivedBid): action = Accept(self._me, self._lastReceivedBid) else: for _attempt in range(20): bid = self._getRandomBid(self._profile.getProfile().getDomain()) if self._isGood(bid): break action = Offer(self._me, bid) self.getConnection().send(action) def _isGood(self, bid: Bid) -> bool: if bid == None: return False profile = self._profile.getProfile() if isinstance(profile, UtilitySpace): return profile.getUtility(bid) > 0.6 raise Exception("Can not handle this type of profile") def _getRandomBid(self, domain: Domain) -> Bid: allBids = AllBidsList(domain) return allBids.get(randint(0, allBids.size() - 1)) def _vote(self, voting: Voting) -> Votes: """ @param voting the {@link Voting} object containing the options @return our next Votes. """ val = self._settings.getParameters().get("minPower") minpower: int = val if isinstance(val, int) else 2 val = self._settings.getParameters().get("maxPower") maxpower: int = val if isinstance(val, int) else 9999999 votes: Set[Vote] = set( [ Vote(self._me, offer.getBid(), minpower, maxpower) for offer in voting.getOffers() if self._isGood(offer.getBid()) ] ) return Votes(self._me, votes)
38.612676
157
0.655116
import logging from random import randint import traceback from typing import cast, Dict, List, Set, Collection from geniusweb.actions.Accept import Accept from geniusweb.actions.Action import Action from geniusweb.actions.LearningDone import LearningDone from geniusweb.actions.Offer import Offer from geniusweb.actions.PartyId import PartyId from geniusweb.actions.Vote import Vote from geniusweb.actions.Votes import Votes from geniusweb.bidspace.AllBidsList import AllBidsList from geniusweb.inform.ActionDone import ActionDone from geniusweb.inform.Finished import Finished from geniusweb.inform.Inform import Inform from geniusweb.inform.OptIn import OptIn from geniusweb.inform.Settings import Settings from geniusweb.inform.Voting import Voting from geniusweb.inform.YourTurn import YourTurn from geniusweb.issuevalue.Bid import Bid from geniusweb.issuevalue.Domain import Domain from geniusweb.issuevalue.Value import Value from geniusweb.issuevalue.ValueSet import ValueSet from geniusweb.party.Capabilities import Capabilities from geniusweb.party.DefaultParty import DefaultParty from geniusweb.profile.utilityspace.UtilitySpace import UtilitySpace from geniusweb.profileconnection.ProfileConnectionFactory import ( ProfileConnectionFactory, ) from geniusweb.progress.ProgressRounds import ProgressRounds from geniusweb.utils import val class RandomAgent(DefaultParty): def __init__(self): super().__init__() self.getReporter().log(logging.INFO, "party is initialized") self._profile = None self._lastReceivedBid: Bid = None def notifyChange(self, info: Inform): if isinstance(info, Settings): self._settings: Settings = cast(Settings, info) self._me = self._settings.getID() self._protocol: str = str(self._settings.getProtocol().getURI()) self._progress = self._settings.getProgress() if "Learn" == self._protocol: self.getConnection().send(LearningDone(self._me)) else: self._profile = ProfileConnectionFactory.create( info.getProfile().getURI(), self.getReporter() ) elif isinstance(info, ActionDone): action: Action = cast(ActionDone, info).getAction() if isinstance(action, Offer): self._lastReceivedBid = cast(Offer, action).getBid() elif isinstance(info, YourTurn): self._myTurn() if isinstance(self._progress, ProgressRounds): self._progress = self._progress.advance() elif isinstance(info, Finished): self.terminate() elif isinstance(info, Voting): self._lastvotes = self._vote(cast(Voting, info)) val(self.getConnection()).send(self._lastvotes) elif isinstance(info, OptIn): val(self.getConnection()).send(self._lastvotes) else: self.getReporter().log( logging.WARNING, "Ignoring unknown info " + str(info) ) def getCapabilities(self) -> Capabilities: return Capabilities( set(["SAOP", "Learn", "MOPAC"]), set(["geniusweb.profile.utilityspace.LinearAdditive"]), ) def getDescription(self) -> str: return "Offers random bids until a bid with sufficient utility is offered. Parameters minPower and maxPower can be used to control voting behaviour." def terminate(self): self.getReporter().log(logging.INFO, "party is terminating:") super().terminate() if self._profile != None: self._profile.close() self._profile = None def _myTurn(self): if self._isGood(self._lastReceivedBid): action = Accept(self._me, self._lastReceivedBid) else: for _attempt in range(20): bid = self._getRandomBid(self._profile.getProfile().getDomain()) if self._isGood(bid): break action = Offer(self._me, bid) self.getConnection().send(action) def _isGood(self, bid: Bid) -> bool: if bid == None: return False profile = self._profile.getProfile() if isinstance(profile, UtilitySpace): return profile.getUtility(bid) > 0.6 raise Exception("Can not handle this type of profile") def _getRandomBid(self, domain: Domain) -> Bid: allBids = AllBidsList(domain) return allBids.get(randint(0, allBids.size() - 1)) def _vote(self, voting: Voting) -> Votes: val = self._settings.getParameters().get("minPower") minpower: int = val if isinstance(val, int) else 2 val = self._settings.getParameters().get("maxPower") maxpower: int = val if isinstance(val, int) else 9999999 votes: Set[Vote] = set( [ Vote(self._me, offer.getBid(), minpower, maxpower) for offer in voting.getOffers() if self._isGood(offer.getBid()) ] ) return Votes(self._me, votes)
true
true
f70299a88cbcaeb559e7429ab61fc8c936bbdd40
475
py
Python
src/constants.py
inwe-boku/windpower-decomposition-usa
e7b11c9fffc4864d2e4f3cb7e4c6f3cfaf469ff7
[ "MIT" ]
null
null
null
src/constants.py
inwe-boku/windpower-decomposition-usa
e7b11c9fffc4864d2e4f3cb7e4c6f3cfaf469ff7
[ "MIT" ]
null
null
null
src/constants.py
inwe-boku/windpower-decomposition-usa
e7b11c9fffc4864d2e4f3cb7e4c6f3cfaf469ff7
[ "MIT" ]
null
null
null
METER_TO_KM = 1e-3 ONE_TO_KILO = 1e3 KM_TO_METER = 1e3 KILO_TO_ONE = 1e3 # Average earth radius, see https://en.wikipedia.org/wiki/Earth_radius EARTH_RADIUS_KM = 6371.0088 # in reality air density varies between 1.14 and 1.42 in kg/m^3 AIR_DENSITY_RHO = 1.225 # of course this introduces a small mistake due to leap years, but in average it's quite ok # Warning: in most cases it might be better to use mean() instead of sum()/HOURS_PER_YEAR HOURS_PER_YEAR = 8765.812536
29.6875
91
0.766316
METER_TO_KM = 1e-3 ONE_TO_KILO = 1e3 KM_TO_METER = 1e3 KILO_TO_ONE = 1e3 EARTH_RADIUS_KM = 6371.0088 AIR_DENSITY_RHO = 1.225 # Warning: in most cases it might be better to use mean() instead of sum()/HOURS_PER_YEAR HOURS_PER_YEAR = 8765.812536
true
true
f70299afa9fb9cd0c528109e36d8380ed2dbcbe6
1,064
py
Python
Python/add-binary.py
sm2774us/leetcode_interview_prep_2021
33b41bea66c266b733372d9a8b9d2965cd88bf8c
[ "Fair" ]
null
null
null
Python/add-binary.py
sm2774us/leetcode_interview_prep_2021
33b41bea66c266b733372d9a8b9d2965cd88bf8c
[ "Fair" ]
null
null
null
Python/add-binary.py
sm2774us/leetcode_interview_prep_2021
33b41bea66c266b733372d9a8b9d2965cd88bf8c
[ "Fair" ]
null
null
null
# Time: O(n) # Space: O(1) class Solution(object): # @param a, a string # @param b, a string # @return a string def addBinary(self, a, b): result, carry, val = "", 0, 0 for i in range(max(len(a), len(b))): val = carry if i < len(a): val += int(a[-(i + 1)]) if i < len(b): val += int(b[-(i + 1)]) carry, val = divmod(val, 2) result += str(val) if carry: result += str(carry) return result[::-1] # Time: O(n) # Space: O(1) from itertools import zip_longest class Solution2(object): def addBinary(self, a, b): """ :type a: str :type b: str :rtype: str """ result = "" carry = 0 for x, y in zip_longest(reversed(a), reversed(b), fillvalue="0"): carry, remainder = divmod(int(x)+int(y)+carry, 2) result += str(remainder) if carry: result += str(carry) return result[::-1]
23.644444
73
0.451128
class Solution(object): def addBinary(self, a, b): result, carry, val = "", 0, 0 for i in range(max(len(a), len(b))): val = carry if i < len(a): val += int(a[-(i + 1)]) if i < len(b): val += int(b[-(i + 1)]) carry, val = divmod(val, 2) result += str(val) if carry: result += str(carry) return result[::-1] from itertools import zip_longest class Solution2(object): def addBinary(self, a, b): result = "" carry = 0 for x, y in zip_longest(reversed(a), reversed(b), fillvalue="0"): carry, remainder = divmod(int(x)+int(y)+carry, 2) result += str(remainder) if carry: result += str(carry) return result[::-1]
true
true
f7029ae637a41736ba37b57a855102b48c280817
111,937
py
Python
sympy/core/tests/test_args.py
nishithshah2211/sympy
edc620ca662f7163637c7fb5823f22523b7f2fe9
[ "BSD-3-Clause" ]
1
2019-06-27T13:40:28.000Z
2019-06-27T13:40:28.000Z
sympy/core/tests/test_args.py
amitsaha/sympy
43ddfc644fd604a3dc0d4cac0aebfecd051917c1
[ "BSD-3-Clause" ]
null
null
null
sympy/core/tests/test_args.py
amitsaha/sympy
43ddfc644fd604a3dc0d4cac0aebfecd051917c1
[ "BSD-3-Clause" ]
null
null
null
"""Test whether all elements of cls.args are instances of Basic. """ # NOTE: keep tests sorted by (module, class name) key. If a class can't # be instantiated, add it here anyway with @SKIP("abstract class) (see # e.g. Function). import os import re import warnings import io from sympy import Basic, S, symbols, sqrt, sin, oo, Interval, exp from sympy.core.compatibility import range from sympy.utilities.pytest import XFAIL, SKIP from sympy.utilities.exceptions import SymPyDeprecationWarning x, y, z = symbols('x,y,z') def test_all_classes_are_tested(): this = os.path.split(__file__)[0] path = os.path.join(this, os.pardir, os.pardir) sympy_path = os.path.abspath(path) prefix = os.path.split(sympy_path)[0] + os.sep re_cls = re.compile("^class ([A-Za-z][A-Za-z0-9_]*)\s*\(", re.MULTILINE) modules = {} for root, dirs, files in os.walk(sympy_path): module = root.replace(prefix, "").replace(os.sep, ".") for file in files: if file.startswith(("_", "test_", "bench_")): continue if not file.endswith(".py"): continue with io.open(os.path.join(root, file), "r", encoding='utf-8') as f: text = f.read() submodule = module + '.' + file[:-3] names = re_cls.findall(text) if not names: continue try: mod = __import__(submodule, fromlist=names) except ImportError: continue def is_Basic(name): cls = getattr(mod, name) return issubclass(cls, Basic) names = list(filter(is_Basic, names)) if names: modules[submodule] = names ns = globals() failed = [] for module, names in modules.items(): mod = module.replace('.', '__') for name in names: test = 'test_' + mod + '__' + name if test not in ns: failed.append(module + '.' + name) # reset all SymPyDeprecationWarning into errors warnings.simplefilter("error", category=SymPyDeprecationWarning) assert not failed, "Missing classes: %s. Please add tests for these to sympy/core/tests/test_args.py." % ", ".join(failed) def _test_args(obj): return all(isinstance(arg, Basic) for arg in obj.args) def test_sympy__assumptions__assume__AppliedPredicate(): from sympy.assumptions.assume import AppliedPredicate, Predicate assert _test_args(AppliedPredicate(Predicate("test"), 2)) def test_sympy__assumptions__assume__Predicate(): from sympy.assumptions.assume import Predicate assert _test_args(Predicate("test")) @XFAIL def test_sympy__combinatorics__graycode__GrayCode(): from sympy.combinatorics.graycode import GrayCode # an integer is given and returned from GrayCode as the arg assert _test_args(GrayCode(3, start='100')) assert _test_args(GrayCode(3, rank=1)) def test_sympy__combinatorics__subsets__Subset(): from sympy.combinatorics.subsets import Subset assert _test_args(Subset([0, 1], [0, 1, 2, 3])) assert _test_args(Subset(['c', 'd'], ['a', 'b', 'c', 'd'])) @XFAIL def test_sympy__combinatorics__permutations__Permutation(): from sympy.combinatorics.permutations import Permutation assert _test_args(Permutation([0, 1, 2, 3])) def test_sympy__combinatorics__perm_groups__PermutationGroup(): from sympy.combinatorics.permutations import Permutation from sympy.combinatorics.perm_groups import PermutationGroup assert _test_args(PermutationGroup([Permutation([0, 1])])) def test_sympy__combinatorics__polyhedron__Polyhedron(): from sympy.combinatorics.permutations import Permutation from sympy.combinatorics.polyhedron import Polyhedron from sympy.abc import w, x, y, z pgroup = [Permutation([[0, 1, 2], [3]]), Permutation([[0, 1, 3], [2]]), Permutation([[0, 2, 3], [1]]), Permutation([[1, 2, 3], [0]]), Permutation([[0, 1], [2, 3]]), Permutation([[0, 2], [1, 3]]), Permutation([[0, 3], [1, 2]]), Permutation([[0, 1, 2, 3]])] corners = [w, x, y, z] faces = [(w, x, y), (w, y, z), (w, z, x), (x, y, z)] assert _test_args(Polyhedron(corners, faces, pgroup)) @XFAIL def test_sympy__combinatorics__prufer__Prufer(): from sympy.combinatorics.prufer import Prufer assert _test_args(Prufer([[0, 1], [0, 2], [0, 3]], 4)) def test_sympy__combinatorics__partitions__Partition(): from sympy.combinatorics.partitions import Partition assert _test_args(Partition([1])) @XFAIL def test_sympy__combinatorics__partitions__IntegerPartition(): from sympy.combinatorics.partitions import IntegerPartition assert _test_args(IntegerPartition([1])) def test_sympy__concrete__products__Product(): from sympy.concrete.products import Product assert _test_args(Product(x, (x, 0, 10))) assert _test_args(Product(x, (x, 0, y), (y, 0, 10))) @SKIP("abstract Class") def test_sympy__concrete__expr_with_limits__ExprWithLimits(): from sympy.concrete.expr_with_limits import ExprWithLimits assert _test_args(ExprWithLimits(x, (x, 0, 10))) assert _test_args(ExprWithLimits(x*y, (x, 0, 10.),(y,1.,3))) @SKIP("abstract Class") def test_sympy__concrete__expr_with_limits__AddWithLimits(): from sympy.concrete.expr_with_limits import AddWithLimits assert _test_args(AddWithLimits(x, (x, 0, 10))) assert _test_args(AddWithLimits(x*y, (x, 0, 10),(y,1,3))) @SKIP("abstract Class") def test_sympy__concrete__expr_with_intlimits__ExprWithIntLimits(): from sympy.concrete.expr_with_intlimits import ExprWithIntLimits assert _test_args(ExprWithIntLimits(x, (x, 0, 10))) assert _test_args(ExprWithIntLimits(x*y, (x, 0, 10),(y,1,3))) def test_sympy__concrete__summations__Sum(): from sympy.concrete.summations import Sum assert _test_args(Sum(x, (x, 0, 10))) assert _test_args(Sum(x, (x, 0, y), (y, 0, 10))) def test_sympy__core__add__Add(): from sympy.core.add import Add assert _test_args(Add(x, y, z, 2)) def test_sympy__core__basic__Atom(): from sympy.core.basic import Atom assert _test_args(Atom()) def test_sympy__core__basic__Basic(): from sympy.core.basic import Basic assert _test_args(Basic()) def test_sympy__core__containers__Dict(): from sympy.core.containers import Dict assert _test_args(Dict({x: y, y: z})) def test_sympy__core__containers__Tuple(): from sympy.core.containers import Tuple assert _test_args(Tuple(x, y, z, 2)) def test_sympy__core__expr__AtomicExpr(): from sympy.core.expr import AtomicExpr assert _test_args(AtomicExpr()) def test_sympy__core__expr__Expr(): from sympy.core.expr import Expr assert _test_args(Expr()) def test_sympy__core__function__Application(): from sympy.core.function import Application assert _test_args(Application(1, 2, 3)) def test_sympy__core__function__AppliedUndef(): from sympy.core.function import AppliedUndef assert _test_args(AppliedUndef(1, 2, 3)) def test_sympy__core__function__Derivative(): from sympy.core.function import Derivative assert _test_args(Derivative(2, x, y, 3)) @SKIP("abstract class") def test_sympy__core__function__Function(): pass def test_sympy__core__function__Lambda(): from sympy.core.function import Lambda assert _test_args(Lambda((x, y), x + y + z)) def test_sympy__core__function__Subs(): from sympy.core.function import Subs assert _test_args(Subs(x + y, x, 2)) def test_sympy__core__function__WildFunction(): from sympy.core.function import WildFunction assert _test_args(WildFunction('f')) def test_sympy__core__mod__Mod(): from sympy.core.mod import Mod assert _test_args(Mod(x, 2)) def test_sympy__core__mul__Mul(): from sympy.core.mul import Mul assert _test_args(Mul(2, x, y, z)) def test_sympy__core__numbers__Catalan(): from sympy.core.numbers import Catalan assert _test_args(Catalan()) def test_sympy__core__numbers__ComplexInfinity(): from sympy.core.numbers import ComplexInfinity assert _test_args(ComplexInfinity()) def test_sympy__core__numbers__EulerGamma(): from sympy.core.numbers import EulerGamma assert _test_args(EulerGamma()) def test_sympy__core__numbers__Exp1(): from sympy.core.numbers import Exp1 assert _test_args(Exp1()) def test_sympy__core__numbers__Float(): from sympy.core.numbers import Float assert _test_args(Float(1.23)) def test_sympy__core__numbers__GoldenRatio(): from sympy.core.numbers import GoldenRatio assert _test_args(GoldenRatio()) def test_sympy__core__numbers__Half(): from sympy.core.numbers import Half assert _test_args(Half()) def test_sympy__core__numbers__ImaginaryUnit(): from sympy.core.numbers import ImaginaryUnit assert _test_args(ImaginaryUnit()) def test_sympy__core__numbers__Infinity(): from sympy.core.numbers import Infinity assert _test_args(Infinity()) def test_sympy__core__numbers__Integer(): from sympy.core.numbers import Integer assert _test_args(Integer(7)) @SKIP("abstract class") def test_sympy__core__numbers__IntegerConstant(): pass def test_sympy__core__numbers__NaN(): from sympy.core.numbers import NaN assert _test_args(NaN()) def test_sympy__core__numbers__NegativeInfinity(): from sympy.core.numbers import NegativeInfinity assert _test_args(NegativeInfinity()) def test_sympy__core__numbers__NegativeOne(): from sympy.core.numbers import NegativeOne assert _test_args(NegativeOne()) def test_sympy__core__numbers__Number(): from sympy.core.numbers import Number assert _test_args(Number(1, 7)) def test_sympy__core__numbers__NumberSymbol(): from sympy.core.numbers import NumberSymbol assert _test_args(NumberSymbol()) def test_sympy__core__numbers__One(): from sympy.core.numbers import One assert _test_args(One()) def test_sympy__core__numbers__Pi(): from sympy.core.numbers import Pi assert _test_args(Pi()) def test_sympy__core__numbers__Rational(): from sympy.core.numbers import Rational assert _test_args(Rational(1, 7)) @SKIP("abstract class") def test_sympy__core__numbers__RationalConstant(): pass def test_sympy__core__numbers__Zero(): from sympy.core.numbers import Zero assert _test_args(Zero()) @SKIP("abstract class") def test_sympy__core__operations__AssocOp(): pass @SKIP("abstract class") def test_sympy__core__operations__LatticeOp(): pass def test_sympy__core__power__Pow(): from sympy.core.power import Pow assert _test_args(Pow(x, 2)) def test_sympy__core__relational__Equality(): from sympy.core.relational import Equality assert _test_args(Equality(x, 2)) def test_sympy__core__relational__GreaterThan(): from sympy.core.relational import GreaterThan assert _test_args(GreaterThan(x, 2)) def test_sympy__core__relational__LessThan(): from sympy.core.relational import LessThan assert _test_args(LessThan(x, 2)) @SKIP("abstract class") def test_sympy__core__relational__Relational(): pass def test_sympy__core__relational__StrictGreaterThan(): from sympy.core.relational import StrictGreaterThan assert _test_args(StrictGreaterThan(x, 2)) def test_sympy__core__relational__StrictLessThan(): from sympy.core.relational import StrictLessThan assert _test_args(StrictLessThan(x, 2)) def test_sympy__core__relational__Unequality(): from sympy.core.relational import Unequality assert _test_args(Unequality(x, 2)) def test_sympy__sets__sets__EmptySet(): from sympy.sets.sets import EmptySet assert _test_args(EmptySet()) def test_sympy__sets__sets__UniversalSet(): from sympy.sets.sets import UniversalSet assert _test_args(UniversalSet()) def test_sympy__sets__sets__FiniteSet(): from sympy.sets.sets import FiniteSet assert _test_args(FiniteSet(x, y, z)) def test_sympy__sets__sets__Interval(): from sympy.sets.sets import Interval assert _test_args(Interval(0, 1)) def test_sympy__sets__sets__ProductSet(): from sympy.sets.sets import ProductSet, Interval assert _test_args(ProductSet(Interval(0, 1), Interval(0, 1))) @SKIP("does it make sense to test this?") def test_sympy__sets__sets__Set(): from sympy.sets.sets import Set assert _test_args(Set()) def test_sympy__sets__sets__Intersection(): from sympy.sets.sets import Intersection, Interval assert _test_args(Intersection(Interval(0, 3), Interval(2, 4), evaluate=False)) def test_sympy__sets__sets__Union(): from sympy.sets.sets import Union, Interval assert _test_args(Union(Interval(0, 1), Interval(2, 3))) def test_sympy__sets__sets__Complement(): from sympy.sets.sets import Complement assert _test_args(Complement(Interval(0, 2), Interval(0, 1))) def test_sympy__sets__sets__SymmetricDifference(): from sympy.sets.sets import FiniteSet, SymmetricDifference assert _test_args(SymmetricDifference(FiniteSet(1, 2, 3), \ FiniteSet(2, 3, 4))) def test_sympy__core__trace__Tr(): from sympy.core.trace import Tr a, b = symbols('a b') assert _test_args(Tr(a + b)) def test_sympy__sets__fancysets__Naturals(): from sympy.sets.fancysets import Naturals assert _test_args(Naturals()) def test_sympy__sets__fancysets__Naturals0(): from sympy.sets.fancysets import Naturals0 assert _test_args(Naturals0()) def test_sympy__sets__fancysets__Integers(): from sympy.sets.fancysets import Integers assert _test_args(Integers()) def test_sympy__sets__fancysets__Reals(): from sympy.sets.fancysets import Reals assert _test_args(Reals()) def test_sympy__sets__fancysets__ImageSet(): from sympy.sets.fancysets import ImageSet from sympy import S, Lambda, Symbol x = Symbol('x') assert _test_args(ImageSet(Lambda(x, x**2), S.Naturals)) def test_sympy__sets__fancysets__Range(): from sympy.sets.fancysets import Range assert _test_args(Range(1, 5, 1)) def test_sympy__sets__contains__Contains(): from sympy.sets.fancysets import Range from sympy.sets.contains import Contains assert _test_args(Contains(x, Range(0, 10, 2))) # STATS from sympy.stats.crv_types import NormalDistribution nd = NormalDistribution(0, 1) from sympy.stats.frv_types import DieDistribution die = DieDistribution(6) def test_sympy__stats__crv__ContinuousDomain(): from sympy.stats.crv import ContinuousDomain assert _test_args(ContinuousDomain(set([x]), Interval(-oo, oo))) def test_sympy__stats__crv__SingleContinuousDomain(): from sympy.stats.crv import SingleContinuousDomain assert _test_args(SingleContinuousDomain(x, Interval(-oo, oo))) def test_sympy__stats__crv__ProductContinuousDomain(): from sympy.stats.crv import SingleContinuousDomain, ProductContinuousDomain D = SingleContinuousDomain(x, Interval(-oo, oo)) E = SingleContinuousDomain(y, Interval(0, oo)) assert _test_args(ProductContinuousDomain(D, E)) def test_sympy__stats__crv__ConditionalContinuousDomain(): from sympy.stats.crv import (SingleContinuousDomain, ConditionalContinuousDomain) D = SingleContinuousDomain(x, Interval(-oo, oo)) assert _test_args(ConditionalContinuousDomain(D, x > 0)) def test_sympy__stats__crv__ContinuousPSpace(): from sympy.stats.crv import ContinuousPSpace, SingleContinuousDomain D = SingleContinuousDomain(x, Interval(-oo, oo)) assert _test_args(ContinuousPSpace(D, nd)) def test_sympy__stats__crv__SingleContinuousPSpace(): from sympy.stats.crv import SingleContinuousPSpace assert _test_args(SingleContinuousPSpace(x, nd)) def test_sympy__stats__crv__ProductContinuousPSpace(): from sympy.stats.crv import ProductContinuousPSpace, SingleContinuousPSpace A = SingleContinuousPSpace(x, nd) B = SingleContinuousPSpace(y, nd) assert _test_args(ProductContinuousPSpace(A, B)) @SKIP("abstract class") def test_sympy__stats__crv__SingleContinuousDistribution(): pass def test_sympy__stats__drv__SingleDiscreteDomain(): from sympy.stats.drv import SingleDiscreteDomain assert _test_args(SingleDiscreteDomain(x, S.Naturals)) def test_sympy__stats__drv__SingleDiscretePSpace(): from sympy.stats.drv import SingleDiscretePSpace from sympy.stats.drv_types import PoissonDistribution assert _test_args(SingleDiscretePSpace(x, PoissonDistribution(1))) @SKIP("abstract class") def test_sympy__stats__drv__SingleDiscreteDistribution(): pass def test_sympy__stats__rv__RandomDomain(): from sympy.stats.rv import RandomDomain from sympy.sets.sets import FiniteSet assert _test_args(RandomDomain(FiniteSet(x), FiniteSet(1, 2, 3))) def test_sympy__stats__rv__SingleDomain(): from sympy.stats.rv import SingleDomain from sympy.sets.sets import FiniteSet assert _test_args(SingleDomain(x, FiniteSet(1, 2, 3))) def test_sympy__stats__rv__ConditionalDomain(): from sympy.stats.rv import ConditionalDomain, RandomDomain from sympy.sets.sets import FiniteSet D = RandomDomain(FiniteSet(x), FiniteSet(1, 2)) assert _test_args(ConditionalDomain(D, x > 1)) def test_sympy__stats__rv__PSpace(): from sympy.stats.rv import PSpace, RandomDomain from sympy import FiniteSet D = RandomDomain(FiniteSet(x), FiniteSet(1, 2, 3, 4, 5, 6)) assert _test_args(PSpace(D, die)) @SKIP("abstract Class") def test_sympy__stats__rv__SinglePSpace(): pass def test_sympy__stats__rv__RandomSymbol(): from sympy.stats.rv import RandomSymbol from sympy.stats.crv import SingleContinuousPSpace A = SingleContinuousPSpace(x, nd) assert _test_args(RandomSymbol(A, x)) def test_sympy__stats__rv__ProductPSpace(): from sympy.stats.rv import ProductPSpace from sympy.stats.crv import SingleContinuousPSpace A = SingleContinuousPSpace(x, nd) B = SingleContinuousPSpace(y, nd) assert _test_args(ProductPSpace(A, B)) def test_sympy__stats__rv__ProductDomain(): from sympy.stats.rv import ProductDomain, SingleDomain D = SingleDomain(x, Interval(-oo, oo)) E = SingleDomain(y, Interval(0, oo)) assert _test_args(ProductDomain(D, E)) def test_sympy__stats__frv_types__DiscreteUniformDistribution(): from sympy.stats.frv_types import DiscreteUniformDistribution from sympy.core.containers import Tuple assert _test_args(DiscreteUniformDistribution(Tuple(*list(range(6))))) def test_sympy__stats__frv_types__DieDistribution(): from sympy.stats.frv_types import DieDistribution assert _test_args(DieDistribution(6)) def test_sympy__stats__frv_types__BernoulliDistribution(): from sympy.stats.frv_types import BernoulliDistribution assert _test_args(BernoulliDistribution(S.Half, 0, 1)) def test_sympy__stats__frv_types__BinomialDistribution(): from sympy.stats.frv_types import BinomialDistribution assert _test_args(BinomialDistribution(5, S.Half, 1, 0)) def test_sympy__stats__frv_types__HypergeometricDistribution(): from sympy.stats.frv_types import HypergeometricDistribution assert _test_args(HypergeometricDistribution(10, 5, 3)) def test_sympy__stats__frv_types__RademacherDistribution(): from sympy.stats.frv_types import RademacherDistribution assert _test_args(RademacherDistribution()) def test_sympy__stats__frv__FiniteDomain(): from sympy.stats.frv import FiniteDomain assert _test_args(FiniteDomain(set([(x, 1), (x, 2)]))) # x can be 1 or 2 def test_sympy__stats__frv__SingleFiniteDomain(): from sympy.stats.frv import SingleFiniteDomain assert _test_args(SingleFiniteDomain(x, set([1, 2]))) # x can be 1 or 2 def test_sympy__stats__frv__ProductFiniteDomain(): from sympy.stats.frv import SingleFiniteDomain, ProductFiniteDomain xd = SingleFiniteDomain(x, set([1, 2])) yd = SingleFiniteDomain(y, set([1, 2])) assert _test_args(ProductFiniteDomain(xd, yd)) def test_sympy__stats__frv__ConditionalFiniteDomain(): from sympy.stats.frv import SingleFiniteDomain, ConditionalFiniteDomain xd = SingleFiniteDomain(x, set([1, 2])) assert _test_args(ConditionalFiniteDomain(xd, x > 1)) def test_sympy__stats__frv__FinitePSpace(): from sympy.stats.frv import FinitePSpace, SingleFiniteDomain xd = SingleFiniteDomain(x, set([1, 2, 3, 4, 5, 6])) p = 1.0/6 xd = SingleFiniteDomain(x, set([1, 2])) assert _test_args(FinitePSpace(xd, {(x, 1): S.Half, (x, 2): S.Half})) def test_sympy__stats__frv__SingleFinitePSpace(): from sympy.stats.frv import SingleFinitePSpace from sympy import Symbol assert _test_args(SingleFinitePSpace(Symbol('x'), die)) def test_sympy__stats__frv__ProductFinitePSpace(): from sympy.stats.frv import SingleFinitePSpace, ProductFinitePSpace from sympy import Symbol xp = SingleFinitePSpace(Symbol('x'), die) yp = SingleFinitePSpace(Symbol('y'), die) assert _test_args(ProductFinitePSpace(xp, yp)) @SKIP("abstract class") def test_sympy__stats__frv__SingleFiniteDistribution(): pass @SKIP("abstract class") def test_sympy__stats__crv__ContinuousDistribution(): pass def test_sympy__stats__frv_types__FiniteDistributionHandmade(): from sympy.stats.frv_types import FiniteDistributionHandmade assert _test_args(FiniteDistributionHandmade({1: 1})) def test_sympy__stats__crv__ContinuousDistributionHandmade(): from sympy.stats.crv import ContinuousDistributionHandmade from sympy import Symbol, Interval assert _test_args(ContinuousDistributionHandmade(Symbol('x'), Interval(0, 2))) def test_sympy__stats__rv__Density(): from sympy.stats.rv import Density from sympy.stats.crv_types import Normal assert _test_args(Density(Normal('x', 0, 1))) def test_sympy__stats__crv_types__ArcsinDistribution(): from sympy.stats.crv_types import ArcsinDistribution assert _test_args(ArcsinDistribution(0, 1)) def test_sympy__stats__crv_types__BeniniDistribution(): from sympy.stats.crv_types import BeniniDistribution assert _test_args(BeniniDistribution(1, 1, 1)) def test_sympy__stats__crv_types__BetaDistribution(): from sympy.stats.crv_types import BetaDistribution assert _test_args(BetaDistribution(1, 1)) def test_sympy__stats__crv_types__BetaPrimeDistribution(): from sympy.stats.crv_types import BetaPrimeDistribution assert _test_args(BetaPrimeDistribution(1, 1)) def test_sympy__stats__crv_types__CauchyDistribution(): from sympy.stats.crv_types import CauchyDistribution assert _test_args(CauchyDistribution(0, 1)) def test_sympy__stats__crv_types__ChiDistribution(): from sympy.stats.crv_types import ChiDistribution assert _test_args(ChiDistribution(1)) def test_sympy__stats__crv_types__ChiNoncentralDistribution(): from sympy.stats.crv_types import ChiNoncentralDistribution assert _test_args(ChiNoncentralDistribution(1,1)) def test_sympy__stats__crv_types__ChiSquaredDistribution(): from sympy.stats.crv_types import ChiSquaredDistribution assert _test_args(ChiSquaredDistribution(1)) def test_sympy__stats__crv_types__DagumDistribution(): from sympy.stats.crv_types import DagumDistribution assert _test_args(DagumDistribution(1, 1, 1)) def test_sympy__stats__crv_types__ExponentialDistribution(): from sympy.stats.crv_types import ExponentialDistribution assert _test_args(ExponentialDistribution(1)) def test_sympy__stats__crv_types__FDistributionDistribution(): from sympy.stats.crv_types import FDistributionDistribution assert _test_args(FDistributionDistribution(1, 1)) def test_sympy__stats__crv_types__FisherZDistribution(): from sympy.stats.crv_types import FisherZDistribution assert _test_args(FisherZDistribution(1, 1)) def test_sympy__stats__crv_types__FrechetDistribution(): from sympy.stats.crv_types import FrechetDistribution assert _test_args(FrechetDistribution(1, 1, 1)) def test_sympy__stats__crv_types__GammaInverseDistribution(): from sympy.stats.crv_types import GammaInverseDistribution assert _test_args(GammaInverseDistribution(1, 1)) def test_sympy__stats__crv_types__GammaDistribution(): from sympy.stats.crv_types import GammaDistribution assert _test_args(GammaDistribution(1, 1)) def test_sympy__stats__crv_types__KumaraswamyDistribution(): from sympy.stats.crv_types import KumaraswamyDistribution assert _test_args(KumaraswamyDistribution(1, 1)) def test_sympy__stats__crv_types__LaplaceDistribution(): from sympy.stats.crv_types import LaplaceDistribution assert _test_args(LaplaceDistribution(0, 1)) def test_sympy__stats__crv_types__LogisticDistribution(): from sympy.stats.crv_types import LogisticDistribution assert _test_args(LogisticDistribution(0, 1)) def test_sympy__stats__crv_types__LogNormalDistribution(): from sympy.stats.crv_types import LogNormalDistribution assert _test_args(LogNormalDistribution(0, 1)) def test_sympy__stats__crv_types__MaxwellDistribution(): from sympy.stats.crv_types import MaxwellDistribution assert _test_args(MaxwellDistribution(1)) def test_sympy__stats__crv_types__NakagamiDistribution(): from sympy.stats.crv_types import NakagamiDistribution assert _test_args(NakagamiDistribution(1, 1)) def test_sympy__stats__crv_types__NormalDistribution(): from sympy.stats.crv_types import NormalDistribution assert _test_args(NormalDistribution(0, 1)) def test_sympy__stats__crv_types__ParetoDistribution(): from sympy.stats.crv_types import ParetoDistribution assert _test_args(ParetoDistribution(1, 1)) def test_sympy__stats__crv_types__QuadraticUDistribution(): from sympy.stats.crv_types import QuadraticUDistribution assert _test_args(QuadraticUDistribution(1, 2)) def test_sympy__stats__crv_types__RaisedCosineDistribution(): from sympy.stats.crv_types import RaisedCosineDistribution assert _test_args(RaisedCosineDistribution(1, 1)) def test_sympy__stats__crv_types__RayleighDistribution(): from sympy.stats.crv_types import RayleighDistribution assert _test_args(RayleighDistribution(1)) def test_sympy__stats__crv_types__StudentTDistribution(): from sympy.stats.crv_types import StudentTDistribution assert _test_args(StudentTDistribution(1)) def test_sympy__stats__crv_types__TriangularDistribution(): from sympy.stats.crv_types import TriangularDistribution assert _test_args(TriangularDistribution(-1, 0, 1)) def test_sympy__stats__crv_types__UniformDistribution(): from sympy.stats.crv_types import UniformDistribution assert _test_args(UniformDistribution(0, 1)) def test_sympy__stats__crv_types__UniformSumDistribution(): from sympy.stats.crv_types import UniformSumDistribution assert _test_args(UniformSumDistribution(1)) def test_sympy__stats__crv_types__VonMisesDistribution(): from sympy.stats.crv_types import VonMisesDistribution assert _test_args(VonMisesDistribution(1, 1)) def test_sympy__stats__crv_types__WeibullDistribution(): from sympy.stats.crv_types import WeibullDistribution assert _test_args(WeibullDistribution(1, 1)) def test_sympy__stats__crv_types__WignerSemicircleDistribution(): from sympy.stats.crv_types import WignerSemicircleDistribution assert _test_args(WignerSemicircleDistribution(1)) def test_sympy__stats__drv_types__PoissonDistribution(): from sympy.stats.drv_types import PoissonDistribution assert _test_args(PoissonDistribution(1)) def test_sympy__stats__drv_types__GeometricDistribution(): from sympy.stats.drv_types import GeometricDistribution assert _test_args(GeometricDistribution(.5)) def test_sympy__core__symbol__Dummy(): from sympy.core.symbol import Dummy assert _test_args(Dummy('t')) def test_sympy__core__symbol__Symbol(): from sympy.core.symbol import Symbol assert _test_args(Symbol('t')) def test_sympy__core__symbol__Wild(): from sympy.core.symbol import Wild assert _test_args(Wild('x', exclude=[x])) @SKIP("abstract class") def test_sympy__functions__combinatorial__factorials__CombinatorialFunction(): pass def test_sympy__functions__combinatorial__factorials__FallingFactorial(): from sympy.functions.combinatorial.factorials import FallingFactorial assert _test_args(FallingFactorial(2, x)) def test_sympy__functions__combinatorial__factorials__MultiFactorial(): from sympy.functions.combinatorial.factorials import MultiFactorial assert _test_args(MultiFactorial(x)) def test_sympy__functions__combinatorial__factorials__RisingFactorial(): from sympy.functions.combinatorial.factorials import RisingFactorial assert _test_args(RisingFactorial(2, x)) def test_sympy__functions__combinatorial__factorials__binomial(): from sympy.functions.combinatorial.factorials import binomial assert _test_args(binomial(2, x)) def test_sympy__functions__combinatorial__factorials__subfactorial(): from sympy.functions.combinatorial.factorials import subfactorial assert _test_args(subfactorial(1)) def test_sympy__functions__combinatorial__factorials__factorial(): from sympy.functions.combinatorial.factorials import factorial assert _test_args(factorial(x)) def test_sympy__functions__combinatorial__factorials__factorial2(): from sympy.functions.combinatorial.factorials import factorial2 assert _test_args(factorial2(x)) def test_sympy__functions__combinatorial__numbers__bell(): from sympy.functions.combinatorial.numbers import bell assert _test_args(bell(x, y)) def test_sympy__functions__combinatorial__numbers__bernoulli(): from sympy.functions.combinatorial.numbers import bernoulli assert _test_args(bernoulli(x)) def test_sympy__functions__combinatorial__numbers__catalan(): from sympy.functions.combinatorial.numbers import catalan assert _test_args(catalan(x)) def test_sympy__functions__combinatorial__numbers__genocchi(): from sympy.functions.combinatorial.numbers import genocchi assert _test_args(genocchi(x)) def test_sympy__functions__combinatorial__numbers__euler(): from sympy.functions.combinatorial.numbers import euler assert _test_args(euler(x)) def test_sympy__functions__combinatorial__numbers__fibonacci(): from sympy.functions.combinatorial.numbers import fibonacci assert _test_args(fibonacci(x)) def test_sympy__functions__combinatorial__numbers__harmonic(): from sympy.functions.combinatorial.numbers import harmonic assert _test_args(harmonic(x, 2)) def test_sympy__functions__combinatorial__numbers__lucas(): from sympy.functions.combinatorial.numbers import lucas assert _test_args(lucas(x)) def test_sympy__functions__elementary__complexes__Abs(): from sympy.functions.elementary.complexes import Abs assert _test_args(Abs(x)) def test_sympy__functions__elementary__complexes__adjoint(): from sympy.functions.elementary.complexes import adjoint assert _test_args(adjoint(x)) def test_sympy__functions__elementary__complexes__arg(): from sympy.functions.elementary.complexes import arg assert _test_args(arg(x)) def test_sympy__functions__elementary__complexes__conjugate(): from sympy.functions.elementary.complexes import conjugate assert _test_args(conjugate(x)) def test_sympy__functions__elementary__complexes__im(): from sympy.functions.elementary.complexes import im assert _test_args(im(x)) def test_sympy__functions__elementary__complexes__re(): from sympy.functions.elementary.complexes import re assert _test_args(re(x)) def test_sympy__functions__elementary__complexes__sign(): from sympy.functions.elementary.complexes import sign assert _test_args(sign(x)) def test_sympy__functions__elementary__complexes__polar_lift(): from sympy.functions.elementary.complexes import polar_lift assert _test_args(polar_lift(x)) def test_sympy__functions__elementary__complexes__periodic_argument(): from sympy.functions.elementary.complexes import periodic_argument assert _test_args(periodic_argument(x, y)) def test_sympy__functions__elementary__complexes__principal_branch(): from sympy.functions.elementary.complexes import principal_branch assert _test_args(principal_branch(x, y)) def test_sympy__functions__elementary__complexes__transpose(): from sympy.functions.elementary.complexes import transpose assert _test_args(transpose(x)) def test_sympy__functions__elementary__exponential__LambertW(): from sympy.functions.elementary.exponential import LambertW assert _test_args(LambertW(2)) @SKIP("abstract class") def test_sympy__functions__elementary__exponential__ExpBase(): pass def test_sympy__functions__elementary__exponential__exp(): from sympy.functions.elementary.exponential import exp assert _test_args(exp(2)) def test_sympy__functions__elementary__exponential__exp_polar(): from sympy.functions.elementary.exponential import exp_polar assert _test_args(exp_polar(2)) def test_sympy__functions__elementary__exponential__log(): from sympy.functions.elementary.exponential import log assert _test_args(log(2)) @SKIP("abstract class") def test_sympy__functions__elementary__hyperbolic__HyperbolicFunction(): pass @SKIP("abstract class") def test_sympy__functions__elementary__hyperbolic__ReciprocalHyperbolicFunction(): pass def test_sympy__functions__elementary__hyperbolic__acosh(): from sympy.functions.elementary.hyperbolic import acosh assert _test_args(acosh(2)) def test_sympy__functions__elementary__hyperbolic__acoth(): from sympy.functions.elementary.hyperbolic import acoth assert _test_args(acoth(2)) def test_sympy__functions__elementary__hyperbolic__asinh(): from sympy.functions.elementary.hyperbolic import asinh assert _test_args(asinh(2)) def test_sympy__functions__elementary__hyperbolic__atanh(): from sympy.functions.elementary.hyperbolic import atanh assert _test_args(atanh(2)) def test_sympy__functions__elementary__hyperbolic__cosh(): from sympy.functions.elementary.hyperbolic import cosh assert _test_args(cosh(2)) def test_sympy__functions__elementary__hyperbolic__coth(): from sympy.functions.elementary.hyperbolic import coth assert _test_args(coth(2)) def test_sympy__functions__elementary__hyperbolic__csch(): from sympy.functions.elementary.hyperbolic import csch assert _test_args(csch(2)) def test_sympy__functions__elementary__hyperbolic__sech(): from sympy.functions.elementary.hyperbolic import sech assert _test_args(sech(2)) def test_sympy__functions__elementary__hyperbolic__sinh(): from sympy.functions.elementary.hyperbolic import sinh assert _test_args(sinh(2)) def test_sympy__functions__elementary__hyperbolic__tanh(): from sympy.functions.elementary.hyperbolic import tanh assert _test_args(tanh(2)) @SKIP("does this work at all?") def test_sympy__functions__elementary__integers__RoundFunction(): from sympy.functions.elementary.integers import RoundFunction assert _test_args(RoundFunction()) def test_sympy__functions__elementary__integers__ceiling(): from sympy.functions.elementary.integers import ceiling assert _test_args(ceiling(x)) def test_sympy__functions__elementary__integers__floor(): from sympy.functions.elementary.integers import floor assert _test_args(floor(x)) def test_sympy__functions__elementary__miscellaneous__IdentityFunction(): from sympy.functions.elementary.miscellaneous import IdentityFunction assert _test_args(IdentityFunction()) def test_sympy__functions__elementary__miscellaneous__Max(): from sympy.functions.elementary.miscellaneous import Max assert _test_args(Max(x, 2)) def test_sympy__functions__elementary__miscellaneous__Min(): from sympy.functions.elementary.miscellaneous import Min assert _test_args(Min(x, 2)) @SKIP("abstract class") def test_sympy__functions__elementary__miscellaneous__MinMaxBase(): pass def test_sympy__functions__elementary__piecewise__ExprCondPair(): from sympy.functions.elementary.piecewise import ExprCondPair assert _test_args(ExprCondPair(1, True)) def test_sympy__functions__elementary__piecewise__Piecewise(): from sympy.functions.elementary.piecewise import Piecewise assert _test_args(Piecewise((1, x >= 0), (0, True))) @SKIP("abstract class") def test_sympy__functions__elementary__trigonometric__TrigonometricFunction(): pass @SKIP("abstract class") def test_sympy__functions__elementary__trigonometric__ReciprocalTrigonometricFunction(): pass @SKIP("abstract class") def test_sympy__functions__elementary__trigonometric__InverseTrigonometricFunction(): pass def test_sympy__functions__elementary__trigonometric__acos(): from sympy.functions.elementary.trigonometric import acos assert _test_args(acos(2)) def test_sympy__functions__elementary__trigonometric__acot(): from sympy.functions.elementary.trigonometric import acot assert _test_args(acot(2)) def test_sympy__functions__elementary__trigonometric__asin(): from sympy.functions.elementary.trigonometric import asin assert _test_args(asin(2)) def test_sympy__functions__elementary__trigonometric__asec(): from sympy.functions.elementary.trigonometric import asec assert _test_args(asec(2)) def test_sympy__functions__elementary__trigonometric__acsc(): from sympy.functions.elementary.trigonometric import acsc assert _test_args(acsc(2)) def test_sympy__functions__elementary__trigonometric__atan(): from sympy.functions.elementary.trigonometric import atan assert _test_args(atan(2)) def test_sympy__functions__elementary__trigonometric__atan2(): from sympy.functions.elementary.trigonometric import atan2 assert _test_args(atan2(2, 3)) def test_sympy__functions__elementary__trigonometric__cos(): from sympy.functions.elementary.trigonometric import cos assert _test_args(cos(2)) def test_sympy__functions__elementary__trigonometric__csc(): from sympy.functions.elementary.trigonometric import csc assert _test_args(csc(2)) def test_sympy__functions__elementary__trigonometric__cot(): from sympy.functions.elementary.trigonometric import cot assert _test_args(cot(2)) def test_sympy__functions__elementary__trigonometric__sin(): assert _test_args(sin(2)) def test_sympy__functions__elementary__trigonometric__sec(): from sympy.functions.elementary.trigonometric import sec assert _test_args(sec(2)) def test_sympy__functions__elementary__trigonometric__tan(): from sympy.functions.elementary.trigonometric import tan assert _test_args(tan(2)) @SKIP("abstract class") def test_sympy__functions__special__bessel__BesselBase(): pass @SKIP("abstract class") def test_sympy__functions__special__bessel__SphericalBesselBase(): pass def test_sympy__functions__special__bessel__besseli(): from sympy.functions.special.bessel import besseli assert _test_args(besseli(x, 1)) def test_sympy__functions__special__bessel__besselj(): from sympy.functions.special.bessel import besselj assert _test_args(besselj(x, 1)) def test_sympy__functions__special__bessel__besselk(): from sympy.functions.special.bessel import besselk assert _test_args(besselk(x, 1)) def test_sympy__functions__special__bessel__bessely(): from sympy.functions.special.bessel import bessely assert _test_args(bessely(x, 1)) def test_sympy__functions__special__bessel__hankel1(): from sympy.functions.special.bessel import hankel1 assert _test_args(hankel1(x, 1)) def test_sympy__functions__special__bessel__hankel2(): from sympy.functions.special.bessel import hankel2 assert _test_args(hankel2(x, 1)) def test_sympy__functions__special__bessel__jn(): from sympy.functions.special.bessel import jn assert _test_args(jn(0, x)) def test_sympy__functions__special__bessel__yn(): from sympy.functions.special.bessel import yn assert _test_args(yn(0, x)) def test_sympy__functions__special__bessel__AiryBase(): pass def test_sympy__functions__special__bessel__airyai(): from sympy.functions.special.bessel import airyai assert _test_args(airyai(2)) def test_sympy__functions__special__bessel__airybi(): from sympy.functions.special.bessel import airybi assert _test_args(airybi(2)) def test_sympy__functions__special__bessel__airyaiprime(): from sympy.functions.special.bessel import airyaiprime assert _test_args(airyaiprime(2)) def test_sympy__functions__special__bessel__airybiprime(): from sympy.functions.special.bessel import airybiprime assert _test_args(airybiprime(2)) def test_sympy__functions__special__elliptic_integrals__elliptic_k(): from sympy.functions.special.elliptic_integrals import elliptic_k as K assert _test_args(K(x)) def test_sympy__functions__special__elliptic_integrals__elliptic_f(): from sympy.functions.special.elliptic_integrals import elliptic_f as F assert _test_args(F(x, y)) def test_sympy__functions__special__elliptic_integrals__elliptic_e(): from sympy.functions.special.elliptic_integrals import elliptic_e as E assert _test_args(E(x)) assert _test_args(E(x, y)) def test_sympy__functions__special__elliptic_integrals__elliptic_pi(): from sympy.functions.special.elliptic_integrals import elliptic_pi as P assert _test_args(P(x, y)) assert _test_args(P(x, y, z)) def test_sympy__functions__special__delta_functions__DiracDelta(): from sympy.functions.special.delta_functions import DiracDelta assert _test_args(DiracDelta(x, 1)) def test_sympy__functions__special__delta_functions__Heaviside(): from sympy.functions.special.delta_functions import Heaviside assert _test_args(Heaviside(x)) def test_sympy__functions__special__error_functions__erf(): from sympy.functions.special.error_functions import erf assert _test_args(erf(2)) def test_sympy__functions__special__error_functions__erfc(): from sympy.functions.special.error_functions import erfc assert _test_args(erfc(2)) def test_sympy__functions__special__error_functions__erfi(): from sympy.functions.special.error_functions import erfi assert _test_args(erfi(2)) def test_sympy__functions__special__error_functions__erf2(): from sympy.functions.special.error_functions import erf2 assert _test_args(erf2(2, 3)) def test_sympy__functions__special__error_functions__erfinv(): from sympy.functions.special.error_functions import erfinv assert _test_args(erfinv(2)) def test_sympy__functions__special__error_functions__erfcinv(): from sympy.functions.special.error_functions import erfcinv assert _test_args(erfcinv(2)) def test_sympy__functions__special__error_functions__erf2inv(): from sympy.functions.special.error_functions import erf2inv assert _test_args(erf2inv(2, 3)) @SKIP("abstract class") def test_sympy__functions__special__error_functions__FresnelIntegral(): pass def test_sympy__functions__special__error_functions__fresnels(): from sympy.functions.special.error_functions import fresnels assert _test_args(fresnels(2)) def test_sympy__functions__special__error_functions__fresnelc(): from sympy.functions.special.error_functions import fresnelc assert _test_args(fresnelc(2)) def test_sympy__functions__special__error_functions__erfs(): from sympy.functions.special.error_functions import _erfs assert _test_args(_erfs(2)) def test_sympy__functions__special__error_functions__Ei(): from sympy.functions.special.error_functions import Ei assert _test_args(Ei(2)) def test_sympy__functions__special__error_functions__li(): from sympy.functions.special.error_functions import li assert _test_args(li(2)) def test_sympy__functions__special__error_functions__Li(): from sympy.functions.special.error_functions import Li assert _test_args(Li(2)) @SKIP("abstract class") def test_sympy__functions__special__error_functions__TrigonometricIntegral(): pass def test_sympy__functions__special__error_functions__Si(): from sympy.functions.special.error_functions import Si assert _test_args(Si(2)) def test_sympy__functions__special__error_functions__Ci(): from sympy.functions.special.error_functions import Ci assert _test_args(Ci(2)) def test_sympy__functions__special__error_functions__Shi(): from sympy.functions.special.error_functions import Shi assert _test_args(Shi(2)) def test_sympy__functions__special__error_functions__Chi(): from sympy.functions.special.error_functions import Chi assert _test_args(Chi(2)) def test_sympy__functions__special__error_functions__expint(): from sympy.functions.special.error_functions import expint assert _test_args(expint(y, x)) def test_sympy__functions__special__gamma_functions__gamma(): from sympy.functions.special.gamma_functions import gamma assert _test_args(gamma(x)) def test_sympy__functions__special__gamma_functions__loggamma(): from sympy.functions.special.gamma_functions import loggamma assert _test_args(loggamma(2)) def test_sympy__functions__special__gamma_functions__lowergamma(): from sympy.functions.special.gamma_functions import lowergamma assert _test_args(lowergamma(x, 2)) def test_sympy__functions__special__gamma_functions__polygamma(): from sympy.functions.special.gamma_functions import polygamma assert _test_args(polygamma(x, 2)) def test_sympy__functions__special__gamma_functions__uppergamma(): from sympy.functions.special.gamma_functions import uppergamma assert _test_args(uppergamma(x, 2)) def test_sympy__functions__special__beta_functions__beta(): from sympy.functions.special.beta_functions import beta assert _test_args(beta(x, x)) @SKIP("abstract class") def test_sympy__functions__special__hyper__TupleParametersBase(): pass @SKIP("abstract class") def test_sympy__functions__special__hyper__TupleArg(): pass def test_sympy__functions__special__hyper__hyper(): from sympy.functions.special.hyper import hyper assert _test_args(hyper([1, 2, 3], [4, 5], x)) def test_sympy__functions__special__hyper__meijerg(): from sympy.functions.special.hyper import meijerg assert _test_args(meijerg([1, 2, 3], [4, 5], [6], [], x)) @SKIP("abstract class") def test_sympy__functions__special__hyper__HyperRep(): pass def test_sympy__functions__special__hyper__HyperRep_power1(): from sympy.functions.special.hyper import HyperRep_power1 assert _test_args(HyperRep_power1(x, y)) def test_sympy__functions__special__hyper__HyperRep_power2(): from sympy.functions.special.hyper import HyperRep_power2 assert _test_args(HyperRep_power2(x, y)) def test_sympy__functions__special__hyper__HyperRep_log1(): from sympy.functions.special.hyper import HyperRep_log1 assert _test_args(HyperRep_log1(x)) def test_sympy__functions__special__hyper__HyperRep_atanh(): from sympy.functions.special.hyper import HyperRep_atanh assert _test_args(HyperRep_atanh(x)) def test_sympy__functions__special__hyper__HyperRep_asin1(): from sympy.functions.special.hyper import HyperRep_asin1 assert _test_args(HyperRep_asin1(x)) def test_sympy__functions__special__hyper__HyperRep_asin2(): from sympy.functions.special.hyper import HyperRep_asin2 assert _test_args(HyperRep_asin2(x)) def test_sympy__functions__special__hyper__HyperRep_sqrts1(): from sympy.functions.special.hyper import HyperRep_sqrts1 assert _test_args(HyperRep_sqrts1(x, y)) def test_sympy__functions__special__hyper__HyperRep_sqrts2(): from sympy.functions.special.hyper import HyperRep_sqrts2 assert _test_args(HyperRep_sqrts2(x, y)) def test_sympy__functions__special__hyper__HyperRep_log2(): from sympy.functions.special.hyper import HyperRep_log2 assert _test_args(HyperRep_log2(x)) def test_sympy__functions__special__hyper__HyperRep_cosasin(): from sympy.functions.special.hyper import HyperRep_cosasin assert _test_args(HyperRep_cosasin(x, y)) def test_sympy__functions__special__hyper__HyperRep_sinasin(): from sympy.functions.special.hyper import HyperRep_sinasin assert _test_args(HyperRep_sinasin(x, y)) @SKIP("abstract class") def test_sympy__functions__special__polynomials__OrthogonalPolynomial(): pass def test_sympy__functions__special__polynomials__jacobi(): from sympy.functions.special.polynomials import jacobi assert _test_args(jacobi(x, 2, 2, 2)) def test_sympy__functions__special__polynomials__gegenbauer(): from sympy.functions.special.polynomials import gegenbauer assert _test_args(gegenbauer(x, 2, 2)) def test_sympy__functions__special__polynomials__chebyshevt(): from sympy.functions.special.polynomials import chebyshevt assert _test_args(chebyshevt(x, 2)) def test_sympy__functions__special__polynomials__chebyshevt_root(): from sympy.functions.special.polynomials import chebyshevt_root assert _test_args(chebyshevt_root(3, 2)) def test_sympy__functions__special__polynomials__chebyshevu(): from sympy.functions.special.polynomials import chebyshevu assert _test_args(chebyshevu(x, 2)) def test_sympy__functions__special__polynomials__chebyshevu_root(): from sympy.functions.special.polynomials import chebyshevu_root assert _test_args(chebyshevu_root(3, 2)) def test_sympy__functions__special__polynomials__hermite(): from sympy.functions.special.polynomials import hermite assert _test_args(hermite(x, 2)) def test_sympy__functions__special__polynomials__legendre(): from sympy.functions.special.polynomials import legendre assert _test_args(legendre(x, 2)) def test_sympy__functions__special__polynomials__assoc_legendre(): from sympy.functions.special.polynomials import assoc_legendre assert _test_args(assoc_legendre(x, 0, y)) def test_sympy__functions__special__polynomials__laguerre(): from sympy.functions.special.polynomials import laguerre assert _test_args(laguerre(x, 2)) def test_sympy__functions__special__polynomials__assoc_laguerre(): from sympy.functions.special.polynomials import assoc_laguerre assert _test_args(assoc_laguerre(x, 0, y)) def test_sympy__functions__special__spherical_harmonics__Ynm(): from sympy.functions.special.spherical_harmonics import Ynm assert _test_args(Ynm(1, 1, x, y)) def test_sympy__functions__special__spherical_harmonics__Znm(): from sympy.functions.special.spherical_harmonics import Znm assert _test_args(Znm(1, 1, x, y)) def test_sympy__functions__special__tensor_functions__LeviCivita(): from sympy.functions.special.tensor_functions import LeviCivita assert _test_args(LeviCivita(x, y, 2)) def test_sympy__functions__special__tensor_functions__KroneckerDelta(): from sympy.functions.special.tensor_functions import KroneckerDelta assert _test_args(KroneckerDelta(x, y)) def test_sympy__functions__special__zeta_functions__dirichlet_eta(): from sympy.functions.special.zeta_functions import dirichlet_eta assert _test_args(dirichlet_eta(x)) def test_sympy__functions__special__zeta_functions__zeta(): from sympy.functions.special.zeta_functions import zeta assert _test_args(zeta(101)) def test_sympy__functions__special__zeta_functions__lerchphi(): from sympy.functions.special.zeta_functions import lerchphi assert _test_args(lerchphi(x, y, z)) def test_sympy__functions__special__zeta_functions__polylog(): from sympy.functions.special.zeta_functions import polylog assert _test_args(polylog(x, y)) def test_sympy__integrals__integrals__Integral(): from sympy.integrals.integrals import Integral assert _test_args(Integral(2, (x, 0, 1))) def test_sympy__integrals__risch__NonElementaryIntegral(): from sympy.integrals.risch import NonElementaryIntegral assert _test_args(NonElementaryIntegral(exp(-x**2), x)) @SKIP("abstract class") def test_sympy__integrals__transforms__IntegralTransform(): pass def test_sympy__integrals__transforms__MellinTransform(): from sympy.integrals.transforms import MellinTransform assert _test_args(MellinTransform(2, x, y)) def test_sympy__integrals__transforms__InverseMellinTransform(): from sympy.integrals.transforms import InverseMellinTransform assert _test_args(InverseMellinTransform(2, x, y, 0, 1)) def test_sympy__integrals__transforms__LaplaceTransform(): from sympy.integrals.transforms import LaplaceTransform assert _test_args(LaplaceTransform(2, x, y)) def test_sympy__integrals__transforms__InverseLaplaceTransform(): from sympy.integrals.transforms import InverseLaplaceTransform assert _test_args(InverseLaplaceTransform(2, x, y, 0)) @SKIP("abstract class") def test_sympy__integrals__transforms__FourierTypeTransform(): pass def test_sympy__integrals__transforms__InverseFourierTransform(): from sympy.integrals.transforms import InverseFourierTransform assert _test_args(InverseFourierTransform(2, x, y)) def test_sympy__integrals__transforms__FourierTransform(): from sympy.integrals.transforms import FourierTransform assert _test_args(FourierTransform(2, x, y)) @SKIP("abstract class") def test_sympy__integrals__transforms__SineCosineTypeTransform(): pass def test_sympy__integrals__transforms__InverseSineTransform(): from sympy.integrals.transforms import InverseSineTransform assert _test_args(InverseSineTransform(2, x, y)) def test_sympy__integrals__transforms__SineTransform(): from sympy.integrals.transforms import SineTransform assert _test_args(SineTransform(2, x, y)) def test_sympy__integrals__transforms__InverseCosineTransform(): from sympy.integrals.transforms import InverseCosineTransform assert _test_args(InverseCosineTransform(2, x, y)) def test_sympy__integrals__transforms__CosineTransform(): from sympy.integrals.transforms import CosineTransform assert _test_args(CosineTransform(2, x, y)) @SKIP("abstract class") def test_sympy__integrals__transforms__HankelTypeTransform(): pass def test_sympy__integrals__transforms__InverseHankelTransform(): from sympy.integrals.transforms import InverseHankelTransform assert _test_args(InverseHankelTransform(2, x, y, 0)) def test_sympy__integrals__transforms__HankelTransform(): from sympy.integrals.transforms import HankelTransform assert _test_args(HankelTransform(2, x, y, 0)) @XFAIL def test_sympy__liealgebras__cartan_type__CartanType_generator(): from sympy.liealgebras.cartan_type import CartanType_generator assert _test_args(CartanType_generator("A2")) @XFAIL def test_sympy__liealgebras__cartan_type__Standard_Cartan(): from sympy.liealgebras.cartan_type import Standard_Cartan assert _test_args(Standard_Cartan("A", 2)) @XFAIL def test_sympy__liealgebras__weyl_group__WeylGroup(): from sympy.liealgebras.weyl_group import WeylGroup assert _test_args(WeylGroup("B4")) @XFAIL def test_sympy__liealgebras__root_system__RootSystem(): from sympy.liealgebras.root_system import RootSystem assert _test_args(RootSystem("A2")) @XFAIL def test_sympy__liealgebras__type_a__TypeA(): from sympy.liealgebras.type_a import TypeA assert _test_args(TypeA(2)) @XFAIL def test_sympy__liealgebras__type_b__TypeB(): from sympy.liealgebras.type_b import TypeB assert _test_args(TypeB(4)) @XFAIL def test_sympy__liealgebras__type_c__TypeC(): from sympy.liealgebras.type_c import TypeC assert _test_args(TypeC(4)) @XFAIL def test_sympy__liealgebras__type_d__TypeD(): from sympy.liealgebras.type_d import TypeD assert _test_args(TypeD(4)) @XFAIL def test_sympy__liealgebras__type_e__TypeE(): from sympy.liealgebras.type_e import TypeE assert _test_args(TypeE(6)) @XFAIL def test_sympy__liealgebras__type_f__TypeF(): from sympy.liealgebras.type_f import TypeF assert _test_args(TypeF(4)) @XFAIL def test_sympy__liealgebras__type_g__TypeG(): from sympy.liealgebras.type_g import TypeG assert _test_args(TypeG(2)) def test_sympy__logic__boolalg__And(): from sympy.logic.boolalg import And assert _test_args(And(x, y, 2)) @SKIP("abstract class") def test_sympy__logic__boolalg__Boolean(): pass def test_sympy__logic__boolalg__BooleanFunction(): from sympy.logic.boolalg import BooleanFunction assert _test_args(BooleanFunction(1, 2, 3)) @SKIP("abstract class") def test_sympy__logic__boolalg__BooleanAtom(): pass def test_sympy__logic__boolalg__BooleanTrue(): from sympy.logic.boolalg import true assert _test_args(true) def test_sympy__logic__boolalg__BooleanFalse(): from sympy.logic.boolalg import false assert _test_args(false) def test_sympy__logic__boolalg__Equivalent(): from sympy.logic.boolalg import Equivalent assert _test_args(Equivalent(x, 2)) def test_sympy__logic__boolalg__ITE(): from sympy.logic.boolalg import ITE assert _test_args(ITE(x, y, 2)) def test_sympy__logic__boolalg__Implies(): from sympy.logic.boolalg import Implies assert _test_args(Implies(x, y)) def test_sympy__logic__boolalg__Nand(): from sympy.logic.boolalg import Nand assert _test_args(Nand(x, y, 2)) def test_sympy__logic__boolalg__Nor(): from sympy.logic.boolalg import Nor assert _test_args(Nor(x, y)) def test_sympy__logic__boolalg__Not(): from sympy.logic.boolalg import Not assert _test_args(Not(x)) def test_sympy__logic__boolalg__Or(): from sympy.logic.boolalg import Or assert _test_args(Or(x, y)) def test_sympy__logic__boolalg__Xor(): from sympy.logic.boolalg import Xor assert _test_args(Xor(x, y, 2)) def test_sympy__matrices__matrices__DeferredVector(): from sympy.matrices.matrices import DeferredVector assert _test_args(DeferredVector("X")) @SKIP("abstract class") def test_sympy__matrices__expressions__matexpr__MatrixBase(): pass def test_sympy__matrices__immutable__ImmutableMatrix(): from sympy.matrices.immutable import ImmutableMatrix m = ImmutableMatrix([[1, 2], [3, 4]]) assert _test_args(m) assert _test_args(Basic(*list(m))) m = ImmutableMatrix(1, 1, [1]) assert _test_args(m) assert _test_args(Basic(*list(m))) m = ImmutableMatrix(2, 2, lambda i, j: 1) assert m[0, 0] is S.One m = ImmutableMatrix(2, 2, lambda i, j: 1/(1 + i) + 1/(1 + j)) assert m[1, 1] is S.One # true div. will give 1.0 if i,j not sympified assert _test_args(m) assert _test_args(Basic(*list(m))) def test_sympy__matrices__immutable__ImmutableSparseMatrix(): from sympy.matrices.immutable import ImmutableSparseMatrix m = ImmutableSparseMatrix([[1, 2], [3, 4]]) assert _test_args(m) assert _test_args(Basic(*list(m))) m = ImmutableSparseMatrix(1, 1, {(0, 0): 1}) assert _test_args(m) assert _test_args(Basic(*list(m))) m = ImmutableSparseMatrix(1, 1, [1]) assert _test_args(m) assert _test_args(Basic(*list(m))) m = ImmutableSparseMatrix(2, 2, lambda i, j: 1) assert m[0, 0] is S.One m = ImmutableSparseMatrix(2, 2, lambda i, j: 1/(1 + i) + 1/(1 + j)) assert m[1, 1] is S.One # true div. will give 1.0 if i,j not sympified assert _test_args(m) assert _test_args(Basic(*list(m))) def test_sympy__matrices__expressions__slice__MatrixSlice(): from sympy.matrices.expressions.slice import MatrixSlice from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', 4, 4) assert _test_args(MatrixSlice(X, (0, 2), (0, 2))) def test_sympy__matrices__expressions__blockmatrix__BlockDiagMatrix(): from sympy.matrices.expressions.blockmatrix import BlockDiagMatrix from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', x, x) Y = MatrixSymbol('Y', y, y) assert _test_args(BlockDiagMatrix(X, Y)) def test_sympy__matrices__expressions__blockmatrix__BlockMatrix(): from sympy.matrices.expressions.blockmatrix import BlockMatrix from sympy.matrices.expressions import MatrixSymbol, ZeroMatrix X = MatrixSymbol('X', x, x) Y = MatrixSymbol('Y', y, y) Z = MatrixSymbol('Z', x, y) O = ZeroMatrix(y, x) assert _test_args(BlockMatrix([[X, Z], [O, Y]])) def test_sympy__matrices__expressions__inverse__Inverse(): from sympy.matrices.expressions.inverse import Inverse from sympy.matrices.expressions import MatrixSymbol assert _test_args(Inverse(MatrixSymbol('A', 3, 3))) def test_sympy__matrices__expressions__matadd__MatAdd(): from sympy.matrices.expressions.matadd import MatAdd from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', x, y) Y = MatrixSymbol('Y', x, y) assert _test_args(MatAdd(X, Y)) def test_sympy__matrices__expressions__matexpr__Identity(): from sympy.matrices.expressions.matexpr import Identity assert _test_args(Identity(3)) @SKIP("abstract class") def test_sympy__matrices__expressions__matexpr__MatrixExpr(): pass def test_sympy__matrices__expressions__matexpr__MatrixElement(): from sympy.matrices.expressions.matexpr import MatrixSymbol, MatrixElement from sympy import S assert _test_args(MatrixElement(MatrixSymbol('A', 3, 5), S(2), S(3))) @XFAIL def test_sympy__matrices__expressions__matexpr__MatrixSymbol(): from sympy.matrices.expressions.matexpr import MatrixSymbol assert _test_args(MatrixSymbol('A', 3, 5)) def test_sympy__matrices__expressions__matexpr__ZeroMatrix(): from sympy.matrices.expressions.matexpr import ZeroMatrix assert _test_args(ZeroMatrix(3, 5)) def test_sympy__matrices__expressions__matmul__MatMul(): from sympy.matrices.expressions.matmul import MatMul from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', x, y) Y = MatrixSymbol('Y', y, x) assert _test_args(MatMul(X, Y)) def test_sympy__matrices__expressions__diagonal__DiagonalMatrix(): from sympy.matrices.expressions.diagonal import DiagonalMatrix from sympy.matrices.expressions import MatrixSymbol x = MatrixSymbol('x', 10, 1) assert _test_args(DiagonalMatrix(x)) def test_sympy__matrices__expressions__diagonal__DiagonalOf(): from sympy.matrices.expressions.diagonal import DiagonalOf from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('x', 10, 10) assert _test_args(DiagonalOf(X)) def test_sympy__matrices__expressions__hadamard__HadamardProduct(): from sympy.matrices.expressions.hadamard import HadamardProduct from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', x, y) Y = MatrixSymbol('Y', x, y) assert _test_args(HadamardProduct(X, Y)) def test_sympy__matrices__expressions__matpow__MatPow(): from sympy.matrices.expressions.matpow import MatPow from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', x, x) assert _test_args(MatPow(X, 2)) def test_sympy__matrices__expressions__transpose__Transpose(): from sympy.matrices.expressions.transpose import Transpose from sympy.matrices.expressions import MatrixSymbol assert _test_args(Transpose(MatrixSymbol('A', 3, 5))) def test_sympy__matrices__expressions__adjoint__Adjoint(): from sympy.matrices.expressions.adjoint import Adjoint from sympy.matrices.expressions import MatrixSymbol assert _test_args(Adjoint(MatrixSymbol('A', 3, 5))) def test_sympy__matrices__expressions__trace__Trace(): from sympy.matrices.expressions.trace import Trace from sympy.matrices.expressions import MatrixSymbol assert _test_args(Trace(MatrixSymbol('A', 3, 3))) def test_sympy__matrices__expressions__determinant__Determinant(): from sympy.matrices.expressions.determinant import Determinant from sympy.matrices.expressions import MatrixSymbol assert _test_args(Determinant(MatrixSymbol('A', 3, 3))) def test_sympy__matrices__expressions__funcmatrix__FunctionMatrix(): from sympy.matrices.expressions.funcmatrix import FunctionMatrix from sympy import Lambda, symbols i, j = symbols('i,j') assert _test_args(FunctionMatrix(3, 3, Lambda((i, j), i - j) )) def test_sympy__matrices__expressions__fourier__DFT(): from sympy.matrices.expressions.fourier import DFT from sympy import S assert _test_args(DFT(S(2))) def test_sympy__matrices__expressions__fourier__IDFT(): from sympy.matrices.expressions.fourier import IDFT from sympy import S assert _test_args(IDFT(S(2))) from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', 10, 10) def test_sympy__matrices__expressions__factorizations__LofLU(): from sympy.matrices.expressions.factorizations import LofLU assert _test_args(LofLU(X)) def test_sympy__matrices__expressions__factorizations__UofLU(): from sympy.matrices.expressions.factorizations import UofLU assert _test_args(UofLU(X)) def test_sympy__matrices__expressions__factorizations__QofQR(): from sympy.matrices.expressions.factorizations import QofQR assert _test_args(QofQR(X)) def test_sympy__matrices__expressions__factorizations__RofQR(): from sympy.matrices.expressions.factorizations import RofQR assert _test_args(RofQR(X)) def test_sympy__matrices__expressions__factorizations__LofCholesky(): from sympy.matrices.expressions.factorizations import LofCholesky assert _test_args(LofCholesky(X)) def test_sympy__matrices__expressions__factorizations__UofCholesky(): from sympy.matrices.expressions.factorizations import UofCholesky assert _test_args(UofCholesky(X)) def test_sympy__matrices__expressions__factorizations__EigenVectors(): from sympy.matrices.expressions.factorizations import EigenVectors assert _test_args(EigenVectors(X)) def test_sympy__matrices__expressions__factorizations__EigenValues(): from sympy.matrices.expressions.factorizations import EigenValues assert _test_args(EigenValues(X)) def test_sympy__matrices__expressions__factorizations__UofSVD(): from sympy.matrices.expressions.factorizations import UofSVD assert _test_args(UofSVD(X)) def test_sympy__matrices__expressions__factorizations__VofSVD(): from sympy.matrices.expressions.factorizations import VofSVD assert _test_args(VofSVD(X)) def test_sympy__matrices__expressions__factorizations__SofSVD(): from sympy.matrices.expressions.factorizations import SofSVD assert _test_args(SofSVD(X)) @SKIP("abstract class") def test_sympy__matrices__expressions__factorizations__Factorization(): pass def test_sympy__physics__vector__frame__CoordinateSym(): from sympy.physics.vector import CoordinateSym from sympy.physics.vector import ReferenceFrame assert _test_args(CoordinateSym('R_x', ReferenceFrame('R'), 0)) def test_sympy__physics__paulialgebra__Pauli(): from sympy.physics.paulialgebra import Pauli assert _test_args(Pauli(1)) def test_sympy__physics__quantum__anticommutator__AntiCommutator(): from sympy.physics.quantum.anticommutator import AntiCommutator assert _test_args(AntiCommutator(x, y)) def test_sympy__physics__quantum__cartesian__PositionBra3D(): from sympy.physics.quantum.cartesian import PositionBra3D assert _test_args(PositionBra3D(x, y, z)) def test_sympy__physics__quantum__cartesian__PositionKet3D(): from sympy.physics.quantum.cartesian import PositionKet3D assert _test_args(PositionKet3D(x, y, z)) def test_sympy__physics__quantum__cartesian__PositionState3D(): from sympy.physics.quantum.cartesian import PositionState3D assert _test_args(PositionState3D(x, y, z)) def test_sympy__physics__quantum__cartesian__PxBra(): from sympy.physics.quantum.cartesian import PxBra assert _test_args(PxBra(x, y, z)) def test_sympy__physics__quantum__cartesian__PxKet(): from sympy.physics.quantum.cartesian import PxKet assert _test_args(PxKet(x, y, z)) def test_sympy__physics__quantum__cartesian__PxOp(): from sympy.physics.quantum.cartesian import PxOp assert _test_args(PxOp(x, y, z)) def test_sympy__physics__quantum__cartesian__XBra(): from sympy.physics.quantum.cartesian import XBra assert _test_args(XBra(x)) def test_sympy__physics__quantum__cartesian__XKet(): from sympy.physics.quantum.cartesian import XKet assert _test_args(XKet(x)) def test_sympy__physics__quantum__cartesian__XOp(): from sympy.physics.quantum.cartesian import XOp assert _test_args(XOp(x)) def test_sympy__physics__quantum__cartesian__YOp(): from sympy.physics.quantum.cartesian import YOp assert _test_args(YOp(x)) def test_sympy__physics__quantum__cartesian__ZOp(): from sympy.physics.quantum.cartesian import ZOp assert _test_args(ZOp(x)) def test_sympy__physics__quantum__cg__CG(): from sympy.physics.quantum.cg import CG from sympy import S assert _test_args(CG(S(3)/2, S(3)/2, S(1)/2, -S(1)/2, 1, 1)) def test_sympy__physics__quantum__cg__Wigner3j(): from sympy.physics.quantum.cg import Wigner3j assert _test_args(Wigner3j(6, 0, 4, 0, 2, 0)) def test_sympy__physics__quantum__cg__Wigner6j(): from sympy.physics.quantum.cg import Wigner6j assert _test_args(Wigner6j(1, 2, 3, 2, 1, 2)) def test_sympy__physics__quantum__cg__Wigner9j(): from sympy.physics.quantum.cg import Wigner9j assert _test_args(Wigner9j(2, 1, 1, S(3)/2, S(1)/2, 1, S(1)/2, S(1)/2, 0)) def test_sympy__physics__quantum__circuitplot__Mz(): from sympy.physics.quantum.circuitplot import Mz assert _test_args(Mz(0)) def test_sympy__physics__quantum__circuitplot__Mx(): from sympy.physics.quantum.circuitplot import Mx assert _test_args(Mx(0)) def test_sympy__physics__quantum__commutator__Commutator(): from sympy.physics.quantum.commutator import Commutator A, B = symbols('A,B', commutative=False) assert _test_args(Commutator(A, B)) def test_sympy__physics__quantum__constants__HBar(): from sympy.physics.quantum.constants import HBar assert _test_args(HBar()) def test_sympy__physics__quantum__dagger__Dagger(): from sympy.physics.quantum.dagger import Dagger from sympy.physics.quantum.state import Ket assert _test_args(Dagger(Dagger(Ket('psi')))) def test_sympy__physics__quantum__gate__CGate(): from sympy.physics.quantum.gate import CGate, Gate assert _test_args(CGate((0, 1), Gate(2))) def test_sympy__physics__quantum__gate__CGateS(): from sympy.physics.quantum.gate import CGateS, Gate assert _test_args(CGateS((0, 1), Gate(2))) def test_sympy__physics__quantum__gate__CNotGate(): from sympy.physics.quantum.gate import CNotGate assert _test_args(CNotGate(0, 1)) def test_sympy__physics__quantum__gate__Gate(): from sympy.physics.quantum.gate import Gate assert _test_args(Gate(0)) def test_sympy__physics__quantum__gate__HadamardGate(): from sympy.physics.quantum.gate import HadamardGate assert _test_args(HadamardGate(0)) def test_sympy__physics__quantum__gate__IdentityGate(): from sympy.physics.quantum.gate import IdentityGate assert _test_args(IdentityGate(0)) def test_sympy__physics__quantum__gate__OneQubitGate(): from sympy.physics.quantum.gate import OneQubitGate assert _test_args(OneQubitGate(0)) def test_sympy__physics__quantum__gate__PhaseGate(): from sympy.physics.quantum.gate import PhaseGate assert _test_args(PhaseGate(0)) def test_sympy__physics__quantum__gate__SwapGate(): from sympy.physics.quantum.gate import SwapGate assert _test_args(SwapGate(0, 1)) def test_sympy__physics__quantum__gate__TGate(): from sympy.physics.quantum.gate import TGate assert _test_args(TGate(0)) def test_sympy__physics__quantum__gate__TwoQubitGate(): from sympy.physics.quantum.gate import TwoQubitGate assert _test_args(TwoQubitGate(0)) def test_sympy__physics__quantum__gate__UGate(): from sympy.physics.quantum.gate import UGate from sympy.matrices.immutable import ImmutableMatrix from sympy import Integer, Tuple assert _test_args( UGate(Tuple(Integer(1)), ImmutableMatrix([[1, 0], [0, 2]]))) def test_sympy__physics__quantum__gate__XGate(): from sympy.physics.quantum.gate import XGate assert _test_args(XGate(0)) def test_sympy__physics__quantum__gate__YGate(): from sympy.physics.quantum.gate import YGate assert _test_args(YGate(0)) def test_sympy__physics__quantum__gate__ZGate(): from sympy.physics.quantum.gate import ZGate assert _test_args(ZGate(0)) @SKIP("TODO: sympy.physics") def test_sympy__physics__quantum__grover__OracleGate(): from sympy.physics.quantum.grover import OracleGate assert _test_args(OracleGate()) def test_sympy__physics__quantum__grover__WGate(): from sympy.physics.quantum.grover import WGate assert _test_args(WGate(1)) def test_sympy__physics__quantum__hilbert__ComplexSpace(): from sympy.physics.quantum.hilbert import ComplexSpace assert _test_args(ComplexSpace(x)) def test_sympy__physics__quantum__hilbert__DirectSumHilbertSpace(): from sympy.physics.quantum.hilbert import DirectSumHilbertSpace, ComplexSpace, FockSpace c = ComplexSpace(2) f = FockSpace() assert _test_args(DirectSumHilbertSpace(c, f)) def test_sympy__physics__quantum__hilbert__FockSpace(): from sympy.physics.quantum.hilbert import FockSpace assert _test_args(FockSpace()) def test_sympy__physics__quantum__hilbert__HilbertSpace(): from sympy.physics.quantum.hilbert import HilbertSpace assert _test_args(HilbertSpace()) def test_sympy__physics__quantum__hilbert__L2(): from sympy.physics.quantum.hilbert import L2 from sympy import oo, Interval assert _test_args(L2(Interval(0, oo))) def test_sympy__physics__quantum__hilbert__TensorPowerHilbertSpace(): from sympy.physics.quantum.hilbert import TensorPowerHilbertSpace, FockSpace f = FockSpace() assert _test_args(TensorPowerHilbertSpace(f, 2)) def test_sympy__physics__quantum__hilbert__TensorProductHilbertSpace(): from sympy.physics.quantum.hilbert import TensorProductHilbertSpace, FockSpace, ComplexSpace c = ComplexSpace(2) f = FockSpace() assert _test_args(TensorProductHilbertSpace(f, c)) def test_sympy__physics__quantum__innerproduct__InnerProduct(): from sympy.physics.quantum import Bra, Ket, InnerProduct b = Bra('b') k = Ket('k') assert _test_args(InnerProduct(b, k)) def test_sympy__physics__quantum__operator__DifferentialOperator(): from sympy.physics.quantum.operator import DifferentialOperator from sympy import Derivative, Function f = Function('f') assert _test_args(DifferentialOperator(1/x*Derivative(f(x), x), f(x))) def test_sympy__physics__quantum__operator__HermitianOperator(): from sympy.physics.quantum.operator import HermitianOperator assert _test_args(HermitianOperator('H')) def test_sympy__physics__quantum__operator__IdentityOperator(): from sympy.physics.quantum.operator import IdentityOperator assert _test_args(IdentityOperator(5)) def test_sympy__physics__quantum__operator__Operator(): from sympy.physics.quantum.operator import Operator assert _test_args(Operator('A')) def test_sympy__physics__quantum__operator__OuterProduct(): from sympy.physics.quantum.operator import OuterProduct from sympy.physics.quantum import Ket, Bra b = Bra('b') k = Ket('k') assert _test_args(OuterProduct(k, b)) def test_sympy__physics__quantum__operator__UnitaryOperator(): from sympy.physics.quantum.operator import UnitaryOperator assert _test_args(UnitaryOperator('U')) def test_sympy__physics__quantum__piab__PIABBra(): from sympy.physics.quantum.piab import PIABBra assert _test_args(PIABBra('B')) def test_sympy__physics__quantum__boson__BosonOp(): from sympy.physics.quantum.boson import BosonOp assert _test_args(BosonOp('a')) assert _test_args(BosonOp('a', False)) def test_sympy__physics__quantum__boson__BosonFockKet(): from sympy.physics.quantum.boson import BosonFockKet assert _test_args(BosonFockKet(1)) def test_sympy__physics__quantum__boson__BosonFockBra(): from sympy.physics.quantum.boson import BosonFockBra assert _test_args(BosonFockBra(1)) def test_sympy__physics__quantum__boson__BosonCoherentKet(): from sympy.physics.quantum.boson import BosonCoherentKet assert _test_args(BosonCoherentKet(1)) def test_sympy__physics__quantum__boson__BosonCoherentBra(): from sympy.physics.quantum.boson import BosonCoherentBra assert _test_args(BosonCoherentBra(1)) def test_sympy__physics__quantum__fermion__FermionOp(): from sympy.physics.quantum.fermion import FermionOp assert _test_args(FermionOp('c')) assert _test_args(FermionOp('c', False)) def test_sympy__physics__quantum__fermion__FermionFockKet(): from sympy.physics.quantum.fermion import FermionFockKet assert _test_args(FermionFockKet(1)) def test_sympy__physics__quantum__fermion__FermionFockBra(): from sympy.physics.quantum.fermion import FermionFockBra assert _test_args(FermionFockBra(1)) def test_sympy__physics__quantum__pauli__SigmaOpBase(): from sympy.physics.quantum.pauli import SigmaOpBase assert _test_args(SigmaOpBase()) def test_sympy__physics__quantum__pauli__SigmaX(): from sympy.physics.quantum.pauli import SigmaX assert _test_args(SigmaX()) def test_sympy__physics__quantum__pauli__SigmaY(): from sympy.physics.quantum.pauli import SigmaY assert _test_args(SigmaY()) def test_sympy__physics__quantum__pauli__SigmaZ(): from sympy.physics.quantum.pauli import SigmaZ assert _test_args(SigmaZ()) def test_sympy__physics__quantum__pauli__SigmaMinus(): from sympy.physics.quantum.pauli import SigmaMinus assert _test_args(SigmaMinus()) def test_sympy__physics__quantum__pauli__SigmaPlus(): from sympy.physics.quantum.pauli import SigmaPlus assert _test_args(SigmaPlus()) def test_sympy__physics__quantum__pauli__SigmaZKet(): from sympy.physics.quantum.pauli import SigmaZKet assert _test_args(SigmaZKet(0)) def test_sympy__physics__quantum__pauli__SigmaZBra(): from sympy.physics.quantum.pauli import SigmaZBra assert _test_args(SigmaZBra(0)) def test_sympy__physics__quantum__piab__PIABHamiltonian(): from sympy.physics.quantum.piab import PIABHamiltonian assert _test_args(PIABHamiltonian('P')) def test_sympy__physics__quantum__piab__PIABKet(): from sympy.physics.quantum.piab import PIABKet assert _test_args(PIABKet('K')) def test_sympy__physics__quantum__qexpr__QExpr(): from sympy.physics.quantum.qexpr import QExpr assert _test_args(QExpr(0)) def test_sympy__physics__quantum__qft__Fourier(): from sympy.physics.quantum.qft import Fourier assert _test_args(Fourier(0, 1)) def test_sympy__physics__quantum__qft__IQFT(): from sympy.physics.quantum.qft import IQFT assert _test_args(IQFT(0, 1)) def test_sympy__physics__quantum__qft__QFT(): from sympy.physics.quantum.qft import QFT assert _test_args(QFT(0, 1)) def test_sympy__physics__quantum__qft__RkGate(): from sympy.physics.quantum.qft import RkGate assert _test_args(RkGate(0, 1)) def test_sympy__physics__quantum__qubit__IntQubit(): from sympy.physics.quantum.qubit import IntQubit assert _test_args(IntQubit(0)) def test_sympy__physics__quantum__qubit__IntQubitBra(): from sympy.physics.quantum.qubit import IntQubitBra assert _test_args(IntQubitBra(0)) def test_sympy__physics__quantum__qubit__IntQubitState(): from sympy.physics.quantum.qubit import IntQubitState, QubitState assert _test_args(IntQubitState(QubitState(0, 1))) def test_sympy__physics__quantum__qubit__Qubit(): from sympy.physics.quantum.qubit import Qubit assert _test_args(Qubit(0, 0, 0)) def test_sympy__physics__quantum__qubit__QubitBra(): from sympy.physics.quantum.qubit import QubitBra assert _test_args(QubitBra('1', 0)) def test_sympy__physics__quantum__qubit__QubitState(): from sympy.physics.quantum.qubit import QubitState assert _test_args(QubitState(0, 1)) def test_sympy__physics__quantum__density__Density(): from sympy.physics.quantum.density import Density from sympy.physics.quantum.state import Ket assert _test_args(Density([Ket(0), 0.5], [Ket(1), 0.5])) @SKIP("TODO: sympy.physics.quantum.shor: Cmod Not Implemented") def test_sympy__physics__quantum__shor__CMod(): from sympy.physics.quantum.shor import CMod assert _test_args(CMod()) def test_sympy__physics__quantum__spin__CoupledSpinState(): from sympy.physics.quantum.spin import CoupledSpinState assert _test_args(CoupledSpinState(1, 0, (1, 1))) assert _test_args(CoupledSpinState(1, 0, (1, S(1)/2, S(1)/2))) assert _test_args(CoupledSpinState( 1, 0, (1, S(1)/2, S(1)/2), ((2, 3, S(1)/2), (1, 2, 1)) )) j, m, j1, j2, j3, j12, x = symbols('j m j1:4 j12 x') assert CoupledSpinState( j, m, (j1, j2, j3)).subs(j2, x) == CoupledSpinState(j, m, (j1, x, j3)) assert CoupledSpinState(j, m, (j1, j2, j3), ((1, 3, j12), (1, 2, j)) ).subs(j12, x) == \ CoupledSpinState(j, m, (j1, j2, j3), ((1, 3, x), (1, 2, j)) ) def test_sympy__physics__quantum__spin__J2Op(): from sympy.physics.quantum.spin import J2Op assert _test_args(J2Op('J')) def test_sympy__physics__quantum__spin__JminusOp(): from sympy.physics.quantum.spin import JminusOp assert _test_args(JminusOp('J')) def test_sympy__physics__quantum__spin__JplusOp(): from sympy.physics.quantum.spin import JplusOp assert _test_args(JplusOp('J')) def test_sympy__physics__quantum__spin__JxBra(): from sympy.physics.quantum.spin import JxBra assert _test_args(JxBra(1, 0)) def test_sympy__physics__quantum__spin__JxBraCoupled(): from sympy.physics.quantum.spin import JxBraCoupled assert _test_args(JxBraCoupled(1, 0, (1, 1))) def test_sympy__physics__quantum__spin__JxKet(): from sympy.physics.quantum.spin import JxKet assert _test_args(JxKet(1, 0)) def test_sympy__physics__quantum__spin__JxKetCoupled(): from sympy.physics.quantum.spin import JxKetCoupled assert _test_args(JxKetCoupled(1, 0, (1, 1))) def test_sympy__physics__quantum__spin__JxOp(): from sympy.physics.quantum.spin import JxOp assert _test_args(JxOp('J')) def test_sympy__physics__quantum__spin__JyBra(): from sympy.physics.quantum.spin import JyBra assert _test_args(JyBra(1, 0)) def test_sympy__physics__quantum__spin__JyBraCoupled(): from sympy.physics.quantum.spin import JyBraCoupled assert _test_args(JyBraCoupled(1, 0, (1, 1))) def test_sympy__physics__quantum__spin__JyKet(): from sympy.physics.quantum.spin import JyKet assert _test_args(JyKet(1, 0)) def test_sympy__physics__quantum__spin__JyKetCoupled(): from sympy.physics.quantum.spin import JyKetCoupled assert _test_args(JyKetCoupled(1, 0, (1, 1))) def test_sympy__physics__quantum__spin__JyOp(): from sympy.physics.quantum.spin import JyOp assert _test_args(JyOp('J')) def test_sympy__physics__quantum__spin__JzBra(): from sympy.physics.quantum.spin import JzBra assert _test_args(JzBra(1, 0)) def test_sympy__physics__quantum__spin__JzBraCoupled(): from sympy.physics.quantum.spin import JzBraCoupled assert _test_args(JzBraCoupled(1, 0, (1, 1))) def test_sympy__physics__quantum__spin__JzKet(): from sympy.physics.quantum.spin import JzKet assert _test_args(JzKet(1, 0)) def test_sympy__physics__quantum__spin__JzKetCoupled(): from sympy.physics.quantum.spin import JzKetCoupled assert _test_args(JzKetCoupled(1, 0, (1, 1))) def test_sympy__physics__quantum__spin__JzOp(): from sympy.physics.quantum.spin import JzOp assert _test_args(JzOp('J')) def test_sympy__physics__quantum__spin__Rotation(): from sympy.physics.quantum.spin import Rotation from sympy import pi assert _test_args(Rotation(pi, 0, pi/2)) def test_sympy__physics__quantum__spin__SpinState(): from sympy.physics.quantum.spin import SpinState assert _test_args(SpinState(1, 0)) def test_sympy__physics__quantum__spin__WignerD(): from sympy.physics.quantum.spin import WignerD assert _test_args(WignerD(0, 1, 2, 3, 4, 5)) def test_sympy__physics__quantum__state__Bra(): from sympy.physics.quantum.state import Bra assert _test_args(Bra(0)) def test_sympy__physics__quantum__state__BraBase(): from sympy.physics.quantum.state import BraBase assert _test_args(BraBase(0)) def test_sympy__physics__quantum__state__Ket(): from sympy.physics.quantum.state import Ket assert _test_args(Ket(0)) def test_sympy__physics__quantum__state__KetBase(): from sympy.physics.quantum.state import KetBase assert _test_args(KetBase(0)) def test_sympy__physics__quantum__state__State(): from sympy.physics.quantum.state import State assert _test_args(State(0)) def test_sympy__physics__quantum__state__StateBase(): from sympy.physics.quantum.state import StateBase assert _test_args(StateBase(0)) def test_sympy__physics__quantum__state__TimeDepBra(): from sympy.physics.quantum.state import TimeDepBra assert _test_args(TimeDepBra('psi', 't')) def test_sympy__physics__quantum__state__TimeDepKet(): from sympy.physics.quantum.state import TimeDepKet assert _test_args(TimeDepKet('psi', 't')) def test_sympy__physics__quantum__state__TimeDepState(): from sympy.physics.quantum.state import TimeDepState assert _test_args(TimeDepState('psi', 't')) def test_sympy__physics__quantum__state__Wavefunction(): from sympy.physics.quantum.state import Wavefunction from sympy.functions import sin from sympy import Piecewise, pi n = 1 L = 1 g = Piecewise((0, x < 0), (0, x > L), (sqrt(2//L)*sin(n*pi*x/L), True)) assert _test_args(Wavefunction(g, x)) def test_sympy__physics__quantum__tensorproduct__TensorProduct(): from sympy.physics.quantum.tensorproduct import TensorProduct assert _test_args(TensorProduct(x, y)) def test_sympy__physics__quantum__identitysearch__GateIdentity(): from sympy.physics.quantum.gate import X from sympy.physics.quantum.identitysearch import GateIdentity assert _test_args(GateIdentity(X(0), X(0))) def test_sympy__physics__quantum__sho1d__SHOOp(): from sympy.physics.quantum.sho1d import SHOOp assert _test_args(SHOOp('a')) def test_sympy__physics__quantum__sho1d__RaisingOp(): from sympy.physics.quantum.sho1d import RaisingOp assert _test_args(RaisingOp('a')) def test_sympy__physics__quantum__sho1d__LoweringOp(): from sympy.physics.quantum.sho1d import LoweringOp assert _test_args(LoweringOp('a')) def test_sympy__physics__quantum__sho1d__NumberOp(): from sympy.physics.quantum.sho1d import NumberOp assert _test_args(NumberOp('N')) def test_sympy__physics__quantum__sho1d__Hamiltonian(): from sympy.physics.quantum.sho1d import Hamiltonian assert _test_args(Hamiltonian('H')) def test_sympy__physics__quantum__sho1d__SHOState(): from sympy.physics.quantum.sho1d import SHOState assert _test_args(SHOState(0)) def test_sympy__physics__quantum__sho1d__SHOKet(): from sympy.physics.quantum.sho1d import SHOKet assert _test_args(SHOKet(0)) def test_sympy__physics__quantum__sho1d__SHOBra(): from sympy.physics.quantum.sho1d import SHOBra assert _test_args(SHOBra(0)) def test_sympy__physics__secondquant__AnnihilateBoson(): from sympy.physics.secondquant import AnnihilateBoson assert _test_args(AnnihilateBoson(0)) def test_sympy__physics__secondquant__AnnihilateFermion(): from sympy.physics.secondquant import AnnihilateFermion assert _test_args(AnnihilateFermion(0)) @SKIP("abstract class") def test_sympy__physics__secondquant__Annihilator(): pass def test_sympy__physics__secondquant__AntiSymmetricTensor(): from sympy.physics.secondquant import AntiSymmetricTensor i, j = symbols('i j', below_fermi=True) a, b = symbols('a b', above_fermi=True) assert _test_args(AntiSymmetricTensor('v', (a, i), (b, j))) def test_sympy__physics__secondquant__BosonState(): from sympy.physics.secondquant import BosonState assert _test_args(BosonState((0, 1))) @SKIP("abstract class") def test_sympy__physics__secondquant__BosonicOperator(): pass def test_sympy__physics__secondquant__Commutator(): from sympy.physics.secondquant import Commutator assert _test_args(Commutator(x, y)) def test_sympy__physics__secondquant__CreateBoson(): from sympy.physics.secondquant import CreateBoson assert _test_args(CreateBoson(0)) def test_sympy__physics__secondquant__CreateFermion(): from sympy.physics.secondquant import CreateFermion assert _test_args(CreateFermion(0)) @SKIP("abstract class") def test_sympy__physics__secondquant__Creator(): pass def test_sympy__physics__secondquant__Dagger(): from sympy.physics.secondquant import Dagger from sympy import I assert _test_args(Dagger(2*I)) def test_sympy__physics__secondquant__FermionState(): from sympy.physics.secondquant import FermionState assert _test_args(FermionState((0, 1))) def test_sympy__physics__secondquant__FermionicOperator(): from sympy.physics.secondquant import FermionicOperator assert _test_args(FermionicOperator(0)) def test_sympy__physics__secondquant__FockState(): from sympy.physics.secondquant import FockState assert _test_args(FockState((0, 1))) def test_sympy__physics__secondquant__FockStateBosonBra(): from sympy.physics.secondquant import FockStateBosonBra assert _test_args(FockStateBosonBra((0, 1))) def test_sympy__physics__secondquant__FockStateBosonKet(): from sympy.physics.secondquant import FockStateBosonKet assert _test_args(FockStateBosonKet((0, 1))) def test_sympy__physics__secondquant__FockStateBra(): from sympy.physics.secondquant import FockStateBra assert _test_args(FockStateBra((0, 1))) def test_sympy__physics__secondquant__FockStateFermionBra(): from sympy.physics.secondquant import FockStateFermionBra assert _test_args(FockStateFermionBra((0, 1))) def test_sympy__physics__secondquant__FockStateFermionKet(): from sympy.physics.secondquant import FockStateFermionKet assert _test_args(FockStateFermionKet((0, 1))) def test_sympy__physics__secondquant__FockStateKet(): from sympy.physics.secondquant import FockStateKet assert _test_args(FockStateKet((0, 1))) def test_sympy__physics__secondquant__InnerProduct(): from sympy.physics.secondquant import InnerProduct from sympy.physics.secondquant import FockStateKet, FockStateBra assert _test_args(InnerProduct(FockStateBra((0, 1)), FockStateKet((0, 1)))) def test_sympy__physics__secondquant__NO(): from sympy.physics.secondquant import NO, F, Fd assert _test_args(NO(Fd(x)*F(y))) def test_sympy__physics__secondquant__PermutationOperator(): from sympy.physics.secondquant import PermutationOperator assert _test_args(PermutationOperator(0, 1)) def test_sympy__physics__secondquant__SqOperator(): from sympy.physics.secondquant import SqOperator assert _test_args(SqOperator(0)) def test_sympy__physics__secondquant__TensorSymbol(): from sympy.physics.secondquant import TensorSymbol assert _test_args(TensorSymbol(x)) def test_sympy__physics__units__Unit(): from sympy.physics.units import Unit assert _test_args(Unit("meter", "m")) def test_sympy__physics__unitsystems__dimensions__Dimension(): from sympy.physics.unitsystems.dimensions import Dimension assert _test_args(Dimension(name="length", symbol="L", length=1)) def test_sympy__physics__unitsystems__quantities__Quantity(): from sympy.physics.unitsystems.quantities import Quantity from sympy.physics.unitsystems.systems import mks assert _test_args(Quantity(10, mks["m"])) def test_sympy__physics__unitsystems__units__Constant(): from sympy.physics.unitsystems.units import Constant from sympy.physics.unitsystems.dimensions import Dimension length = Dimension(length=1) assert _test_args(Constant(length, abbrev="u", factor=10)) def test_sympy__physics__unitsystems__units__Unit(): from sympy.physics.unitsystems.units import Unit from sympy.physics.unitsystems.dimensions import Dimension length = Dimension(length=1) assert _test_args(Unit(length, abbrev="u", factor=10)) def test_sympy__core__numbers__AlgebraicNumber(): from sympy.core.numbers import AlgebraicNumber assert _test_args(AlgebraicNumber(sqrt(2), [1, 2, 3])) def test_sympy__polys__polytools__GroebnerBasis(): from sympy.polys.polytools import GroebnerBasis assert _test_args(GroebnerBasis([x, y, z], x, y, z)) def test_sympy__polys__polytools__Poly(): from sympy.polys.polytools import Poly assert _test_args(Poly(2, x, y)) def test_sympy__polys__polytools__PurePoly(): from sympy.polys.polytools import PurePoly assert _test_args(PurePoly(2, x, y)) def test_sympy__polys__rootoftools__RootOf(): from sympy.polys.rootoftools import RootOf assert _test_args(RootOf(x**3 + x + 1, 0)) def test_sympy__polys__rootoftools__RootSum(): from sympy.polys.rootoftools import RootSum assert _test_args(RootSum(x**3 + x + 1, sin)) def test_sympy__series__limits__Limit(): from sympy.series.limits import Limit assert _test_args(Limit(x, x, 0, dir='-')) def test_sympy__series__order__Order(): from sympy.series.order import Order assert _test_args(Order(1, x, y)) def test_sympy__simplify__hyperexpand__Hyper_Function(): from sympy.simplify.hyperexpand import Hyper_Function assert _test_args(Hyper_Function([2], [1])) def test_sympy__simplify__hyperexpand__G_Function(): from sympy.simplify.hyperexpand import G_Function assert _test_args(G_Function([2], [1], [], [])) def test_sympy__tensor__indexed__Idx(): from sympy.tensor.indexed import Idx assert _test_args(Idx('test')) assert _test_args(Idx(1, (0, 10))) def test_sympy__tensor__indexed__Indexed(): from sympy.tensor.indexed import Indexed, Idx assert _test_args(Indexed('A', Idx('i'), Idx('j'))) def test_sympy__tensor__indexed__IndexedBase(): from sympy.tensor.indexed import IndexedBase assert _test_args(IndexedBase('A', shape=(x, y))) assert _test_args(IndexedBase('A', 1)) assert _test_args(IndexedBase('A')[0, 1]) @XFAIL def test_sympy__physics__hep__gamma_matrices__GammaMatrixHead(): # This test fails, this class can be reconstructed from the *args # of an instance using `TensorHead(*args)` from sympy.physics.hep.gamma_matrices import GammaMatrixHead, Lorentz from sympy.tensor.tensor import tensor_indices i = tensor_indices('i', Lorentz) assert _test_args(GammaMatrixHead()) def test_sympy__tensor__tensor__TensorIndexType(): from sympy.tensor.tensor import TensorIndexType assert _test_args(TensorIndexType('Lorentz', metric=False)) def test_sympy__tensor__tensor__TensorSymmetry(): from sympy.tensor.tensor import TensorSymmetry, get_symmetric_group_sgs assert _test_args(TensorSymmetry(get_symmetric_group_sgs(2))) def test_sympy__tensor__tensor__TensorType(): from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, get_symmetric_group_sgs, TensorType Lorentz = TensorIndexType('Lorentz', dummy_fmt='L') sym = TensorSymmetry(get_symmetric_group_sgs(1)) assert _test_args(TensorType([Lorentz], sym)) def test_sympy__tensor__tensor__TensorHead(): from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, TensorType, get_symmetric_group_sgs, TensorHead Lorentz = TensorIndexType('Lorentz', dummy_fmt='L') sym = TensorSymmetry(get_symmetric_group_sgs(1)) S1 = TensorType([Lorentz], sym) assert _test_args(TensorHead('p', S1, 0)) def test_sympy__tensor__tensor__TensorIndex(): from sympy.tensor.tensor import TensorIndexType, TensorIndex Lorentz = TensorIndexType('Lorentz', dummy_fmt='L') assert _test_args(TensorIndex('i', Lorentz)) @SKIP("abstract class") def test_sympy__tensor__tensor__TensExpr(): pass def test_sympy__tensor__tensor__TensAdd(): from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, TensorType, get_symmetric_group_sgs, tensor_indices, TensAdd Lorentz = TensorIndexType('Lorentz', dummy_fmt='L') a, b = tensor_indices('a,b', Lorentz) sym = TensorSymmetry(get_symmetric_group_sgs(1)) S1 = TensorType([Lorentz], sym) p, q = S1('p,q') t1 = p(a) t2 = q(a) assert _test_args(TensAdd(t1, t2)) def test_sympy__tensor__tensor__Tensor(): from sympy.core import S from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, TensorType, get_symmetric_group_sgs, tensor_indices, TensMul, TIDS Lorentz = TensorIndexType('Lorentz', dummy_fmt='L') a, b = tensor_indices('a,b', Lorentz) sym = TensorSymmetry(get_symmetric_group_sgs(1)) S1 = TensorType([Lorentz], sym) p = S1('p') assert _test_args(p(a)) def test_sympy__tensor__tensor__TensMul(): from sympy.core import S from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, TensorType, get_symmetric_group_sgs, tensor_indices, TensMul, TIDS Lorentz = TensorIndexType('Lorentz', dummy_fmt='L') a, b = tensor_indices('a,b', Lorentz) sym = TensorSymmetry(get_symmetric_group_sgs(1)) S1 = TensorType([Lorentz], sym) p = S1('p') q = S1('q') assert _test_args(3*p(a)*q(b)) def test_as_coeff_add(): assert (7, (3*x, 4*x**2)) == (7 + 3*x + 4*x**2).as_coeff_add() def test_sympy__geometry__curve__Curve(): from sympy.geometry.curve import Curve assert _test_args(Curve((x, 1), (x, 0, 1))) def test_sympy__geometry__point__Point(): from sympy.geometry.point import Point assert _test_args(Point(0, 1)) def test_sympy__geometry__point3d__Point3D(): from sympy.geometry.point3d import Point3D assert _test_args(Point3D(0, 1, 2)) def test_sympy__geometry__ellipse__Ellipse(): from sympy.geometry.ellipse import Ellipse assert _test_args(Ellipse((0, 1), 2, 3)) def test_sympy__geometry__ellipse__Circle(): from sympy.geometry.ellipse import Circle assert _test_args(Circle((0, 1), 2)) @SKIP("abstract class") def test_sympy__geometry__line__LinearEntity(): pass def test_sympy__geometry__line__Line(): from sympy.geometry.line import Line assert _test_args(Line((0, 1), (2, 3))) def test_sympy__geometry__line__Ray(): from sympy.geometry.line import Ray assert _test_args(Ray((0, 1), (2, 3))) def test_sympy__geometry__line__Segment(): from sympy.geometry.line import Segment assert _test_args(Segment((0, 1), (2, 3))) @SKIP("abstract class") def test_sympy__geometry__line3d__LinearEntity3D(): pass def test_sympy__geometry__line3d__Line3D(): from sympy.geometry.line3d import Line3D assert _test_args(Line3D((0, 1, 1), (2, 3, 4))) def test_sympy__geometry__line3d__Segment3D(): from sympy.geometry.line3d import Segment3D assert _test_args(Segment3D((0, 1, 1), (2, 3, 4))) def test_sympy__geometry__line3d__Ray3D(): from sympy.geometry.line3d import Ray3D assert _test_args(Ray3D((0, 1, 1), (2, 3, 4))) def test_sympy__geometry__plane__Plane(): from sympy.geometry.plane import Plane assert _test_args(Plane((1, 1, 1), (-3, 4, -2), (1, 2, 3))) def test_sympy__geometry__polygon__Polygon(): from sympy.geometry.polygon import Polygon assert _test_args(Polygon((0, 1), (2, 3), (4, 5), (6, 7))) def test_sympy__geometry__polygon__RegularPolygon(): from sympy.geometry.polygon import RegularPolygon assert _test_args(RegularPolygon((0, 1), 2, 3, 4)) def test_sympy__geometry__polygon__Triangle(): from sympy.geometry.polygon import Triangle assert _test_args(Triangle((0, 1), (2, 3), (4, 5))) def test_sympy__geometry__entity__GeometryEntity(): from sympy.geometry.entity import GeometryEntity from sympy.geometry.point import Point assert _test_args(GeometryEntity(Point(1, 0), 1, [1, 2])) def test_sympy__diffgeom__diffgeom__Manifold(): from sympy.diffgeom import Manifold assert _test_args(Manifold('name', 3)) def test_sympy__diffgeom__diffgeom__Patch(): from sympy.diffgeom import Manifold, Patch assert _test_args(Patch('name', Manifold('name', 3))) def test_sympy__diffgeom__diffgeom__CoordSystem(): from sympy.diffgeom import Manifold, Patch, CoordSystem assert _test_args(CoordSystem('name', Patch('name', Manifold('name', 3)))) @XFAIL def test_sympy__diffgeom__diffgeom__Point(): from sympy.diffgeom import Manifold, Patch, CoordSystem, Point assert _test_args(Point( CoordSystem('name', Patch('name', Manifold('name', 3))), [x, y])) def test_sympy__diffgeom__diffgeom__BaseScalarField(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField cs = CoordSystem('name', Patch('name', Manifold('name', 3))) assert _test_args(BaseScalarField(cs, 0)) def test_sympy__diffgeom__diffgeom__BaseVectorField(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseVectorField cs = CoordSystem('name', Patch('name', Manifold('name', 3))) assert _test_args(BaseVectorField(cs, 0)) def test_sympy__diffgeom__diffgeom__Differential(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField, Differential cs = CoordSystem('name', Patch('name', Manifold('name', 3))) assert _test_args(Differential(BaseScalarField(cs, 0))) def test_sympy__diffgeom__diffgeom__Commutator(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseVectorField, Commutator cs = CoordSystem('name', Patch('name', Manifold('name', 3))) cs1 = CoordSystem('name1', Patch('name', Manifold('name', 3))) v = BaseVectorField(cs, 0) v1 = BaseVectorField(cs1, 0) assert _test_args(Commutator(v, v1)) def test_sympy__diffgeom__diffgeom__TensorProduct(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField, Differential, TensorProduct cs = CoordSystem('name', Patch('name', Manifold('name', 3))) d = Differential(BaseScalarField(cs, 0)) assert _test_args(TensorProduct(d, d)) def test_sympy__diffgeom__diffgeom__WedgeProduct(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField, Differential, WedgeProduct cs = CoordSystem('name', Patch('name', Manifold('name', 3))) d = Differential(BaseScalarField(cs, 0)) d1 = Differential(BaseScalarField(cs, 1)) assert _test_args(WedgeProduct(d, d1)) def test_sympy__diffgeom__diffgeom__LieDerivative(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField, Differential, BaseVectorField, LieDerivative cs = CoordSystem('name', Patch('name', Manifold('name', 3))) d = Differential(BaseScalarField(cs, 0)) v = BaseVectorField(cs, 0) assert _test_args(LieDerivative(v, d)) @XFAIL def test_sympy__diffgeom__diffgeom__BaseCovarDerivativeOp(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseCovarDerivativeOp cs = CoordSystem('name', Patch('name', Manifold('name', 3))) assert _test_args(BaseCovarDerivativeOp(cs, 0, [[[0, ]*3, ]*3, ]*3)) def test_sympy__diffgeom__diffgeom__CovarDerivativeOp(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseVectorField, CovarDerivativeOp cs = CoordSystem('name', Patch('name', Manifold('name', 3))) v = BaseVectorField(cs, 0) _test_args(CovarDerivativeOp(v, [[[0, ]*3, ]*3, ]*3)) def test_sympy__categories__baseclasses__Class(): from sympy.categories.baseclasses import Class assert _test_args(Class()) def test_sympy__categories__baseclasses__Object(): from sympy.categories import Object assert _test_args(Object("A")) @XFAIL def test_sympy__categories__baseclasses__Morphism(): from sympy.categories import Object, Morphism assert _test_args(Morphism(Object("A"), Object("B"))) def test_sympy__categories__baseclasses__IdentityMorphism(): from sympy.categories import Object, IdentityMorphism assert _test_args(IdentityMorphism(Object("A"))) def test_sympy__categories__baseclasses__NamedMorphism(): from sympy.categories import Object, NamedMorphism assert _test_args(NamedMorphism(Object("A"), Object("B"), "f")) def test_sympy__categories__baseclasses__CompositeMorphism(): from sympy.categories import Object, NamedMorphism, CompositeMorphism A = Object("A") B = Object("B") C = Object("C") f = NamedMorphism(A, B, "f") g = NamedMorphism(B, C, "g") assert _test_args(CompositeMorphism(f, g)) def test_sympy__categories__baseclasses__Diagram(): from sympy.categories import Object, NamedMorphism, Diagram A = Object("A") B = Object("B") C = Object("C") f = NamedMorphism(A, B, "f") d = Diagram([f]) assert _test_args(d) def test_sympy__categories__baseclasses__Category(): from sympy.categories import Object, NamedMorphism, Diagram, Category A = Object("A") B = Object("B") C = Object("C") f = NamedMorphism(A, B, "f") g = NamedMorphism(B, C, "g") d1 = Diagram([f, g]) d2 = Diagram([f]) K = Category("K", commutative_diagrams=[d1, d2]) assert _test_args(K) def test_sympy__ntheory__factor___totient(): from sympy.ntheory.factor_ import totient k = symbols('k', integer=True) t = totient(k) assert _test_args(t) def test_sympy__ntheory__factor___divisor_sigma(): from sympy.ntheory.factor_ import divisor_sigma k = symbols('k', integer=True) n = symbols('n', integer=True) t = divisor_sigma(n, k) assert _test_args(t) def test_sympy__ntheory__residue_ntheory__mobius(): from sympy.ntheory import mobius assert _test_args(mobius(2)) def test_sympy__physics__optics__waves__TWave(): from sympy.physics.optics import TWave A, f, phi = symbols('A, f, phi') assert _test_args(TWave(A, f, phi)) def test_sympy__physics__optics__gaussopt__BeamParameter(): from sympy.physics.optics import BeamParameter assert _test_args(BeamParameter(530e-9, 1, w=1e-3)) def test_sympy__physics__optics__medium__Medium(): from sympy.physics.optics import Medium assert _test_args(Medium('m')) def test_sympy__printing__codeprinter__Assignment(): from sympy.printing.codeprinter import Assignment assert _test_args(Assignment(x, y)) def test_sympy__vector__coordsysrect__CoordSysCartesian(): from sympy.vector.coordsysrect import CoordSysCartesian assert _test_args(CoordSysCartesian('C')) def test_sympy__vector__point__Point(): from sympy.vector.point import Point assert _test_args(Point('P')) def test_sympy__vector__basisdependent__BasisDependent(): from sympy.vector.basisdependent import BasisDependent #These classes have been created to maintain an OOP hierarchy #for Vectors and Dyadics. Are NOT meant to be initialized def test_sympy__vector__basisdependent__BasisDependentMul(): from sympy.vector.basisdependent import BasisDependentMul #These classes have been created to maintain an OOP hierarchy #for Vectors and Dyadics. Are NOT meant to be initialized def test_sympy__vector__basisdependent__BasisDependentAdd(): from sympy.vector.basisdependent import BasisDependentAdd #These classes have been created to maintain an OOP hierarchy #for Vectors and Dyadics. Are NOT meant to be initialized def test_sympy__vector__basisdependent__BasisDependentZero(): from sympy.vector.basisdependent import BasisDependentZero #These classes have been created to maintain an OOP hierarchy #for Vectors and Dyadics. Are NOT meant to be initialized def test_sympy__vector__vector__BaseVector(): from sympy.vector.vector import BaseVector from sympy.vector.coordsysrect import CoordSysCartesian C = CoordSysCartesian('C') assert _test_args(BaseVector('Ci', 0, C, ' ', ' ')) def test_sympy__vector__vector__VectorAdd(): from sympy.vector.vector import VectorAdd, VectorMul from sympy.vector.coordsysrect import CoordSysCartesian C = CoordSysCartesian('C') from sympy.abc import a, b, c, x, y, z v1 = a*C.i + b*C.j + c*C.k v2 = x*C.i + y*C.j + z*C.k assert _test_args(VectorAdd(v1, v2)) assert _test_args(VectorMul(x, v1)) def test_sympy__vector__vector__VectorMul(): from sympy.vector.vector import VectorMul from sympy.vector.coordsysrect import CoordSysCartesian C = CoordSysCartesian('C') from sympy.abc import a assert _test_args(VectorMul(a, C.i)) def test_sympy__vector__vector__VectorZero(): from sympy.vector.vector import VectorZero assert _test_args(VectorZero()) def test_sympy__vector__vector__Vector(): from sympy.vector.vector import Vector #Vector is never to be initialized using args pass def test_sympy__vector__dyadic__Dyadic(): from sympy.vector.dyadic import Dyadic #Dyadic is never to be initialized using args pass def test_sympy__vector__dyadic__BaseDyadic(): from sympy.vector.dyadic import BaseDyadic from sympy.vector.coordsysrect import CoordSysCartesian C = CoordSysCartesian('C') assert _test_args(BaseDyadic(C.i, C.j)) def test_sympy__vector__dyadic__DyadicMul(): from sympy.vector.dyadic import BaseDyadic, DyadicMul from sympy.vector.coordsysrect import CoordSysCartesian C = CoordSysCartesian('C') assert _test_args(DyadicMul(3, BaseDyadic(C.i, C.j))) def test_sympy__vector__dyadic__DyadicAdd(): from sympy.vector.dyadic import BaseDyadic, DyadicAdd from sympy.vector.coordsysrect import CoordSysCartesian C = CoordSysCartesian('C') assert _test_args(2 * DyadicAdd(BaseDyadic(C.i, C.i), BaseDyadic(C.i, C.j))) def test_sympy__vector__dyadic__DyadicZero(): from sympy.vector.dyadic import DyadicZero assert _test_args(DyadicZero()) def test_sympy__vector__deloperator__Del(): from sympy.vector.deloperator import Del from sympy.vector.coordsysrect import CoordSysCartesian C = CoordSysCartesian('C') assert _test_args(Del(C)) def test_sympy__vector__orienters__Orienter(): from sympy.vector.orienters import Orienter #Not to be initialized def test_sympy__vector__orienters__ThreeAngleOrienter(): from sympy.vector.orienters import ThreeAngleOrienter #Not to be initialized def test_sympy__vector__orienters__AxisOrienter(): from sympy.vector.orienters import AxisOrienter from sympy.vector.coordsysrect import CoordSysCartesian C = CoordSysCartesian('C') assert _test_args(AxisOrienter(x, C.i)) def test_sympy__vector__orienters__BodyOrienter(): from sympy.vector.orienters import BodyOrienter assert _test_args(BodyOrienter(x, y, z, '123')) def test_sympy__vector__orienters__SpaceOrienter(): from sympy.vector.orienters import SpaceOrienter assert _test_args(SpaceOrienter(x, y, z, '123')) def test_sympy__vector__orienters__QuaternionOrienter(): from sympy.vector.orienters import QuaternionOrienter a, b, c, d = symbols('a b c d') assert _test_args(QuaternionOrienter(a, b, c, d)) def test_sympy__vector__scalar__BaseScalar(): from sympy.vector.scalar import BaseScalar from sympy.vector.coordsysrect import CoordSysCartesian C = CoordSysCartesian('C') assert _test_args(BaseScalar('Cx', 0, C, ' ', ' '))
31.710198
135
0.778366
# be instantiated, add it here anyway with @SKIP("abstract class) (see # e.g. Function). import os import re import warnings import io from sympy import Basic, S, symbols, sqrt, sin, oo, Interval, exp from sympy.core.compatibility import range from sympy.utilities.pytest import XFAIL, SKIP from sympy.utilities.exceptions import SymPyDeprecationWarning x, y, z = symbols('x,y,z') def test_all_classes_are_tested(): this = os.path.split(__file__)[0] path = os.path.join(this, os.pardir, os.pardir) sympy_path = os.path.abspath(path) prefix = os.path.split(sympy_path)[0] + os.sep re_cls = re.compile("^class ([A-Za-z][A-Za-z0-9_]*)\s*\(", re.MULTILINE) modules = {} for root, dirs, files in os.walk(sympy_path): module = root.replace(prefix, "").replace(os.sep, ".") for file in files: if file.startswith(("_", "test_", "bench_")): continue if not file.endswith(".py"): continue with io.open(os.path.join(root, file), "r", encoding='utf-8') as f: text = f.read() submodule = module + '.' + file[:-3] names = re_cls.findall(text) if not names: continue try: mod = __import__(submodule, fromlist=names) except ImportError: continue def is_Basic(name): cls = getattr(mod, name) return issubclass(cls, Basic) names = list(filter(is_Basic, names)) if names: modules[submodule] = names ns = globals() failed = [] for module, names in modules.items(): mod = module.replace('.', '__') for name in names: test = 'test_' + mod + '__' + name if test not in ns: failed.append(module + '.' + name) # reset all SymPyDeprecationWarning into errors warnings.simplefilter("error", category=SymPyDeprecationWarning) assert not failed, "Missing classes: %s. Please add tests for these to sympy/core/tests/test_args.py." % ", ".join(failed) def _test_args(obj): return all(isinstance(arg, Basic) for arg in obj.args) def test_sympy__assumptions__assume__AppliedPredicate(): from sympy.assumptions.assume import AppliedPredicate, Predicate assert _test_args(AppliedPredicate(Predicate("test"), 2)) def test_sympy__assumptions__assume__Predicate(): from sympy.assumptions.assume import Predicate assert _test_args(Predicate("test")) @XFAIL def test_sympy__combinatorics__graycode__GrayCode(): from sympy.combinatorics.graycode import GrayCode # an integer is given and returned from GrayCode as the arg assert _test_args(GrayCode(3, start='100')) assert _test_args(GrayCode(3, rank=1)) def test_sympy__combinatorics__subsets__Subset(): from sympy.combinatorics.subsets import Subset assert _test_args(Subset([0, 1], [0, 1, 2, 3])) assert _test_args(Subset(['c', 'd'], ['a', 'b', 'c', 'd'])) @XFAIL def test_sympy__combinatorics__permutations__Permutation(): from sympy.combinatorics.permutations import Permutation assert _test_args(Permutation([0, 1, 2, 3])) def test_sympy__combinatorics__perm_groups__PermutationGroup(): from sympy.combinatorics.permutations import Permutation from sympy.combinatorics.perm_groups import PermutationGroup assert _test_args(PermutationGroup([Permutation([0, 1])])) def test_sympy__combinatorics__polyhedron__Polyhedron(): from sympy.combinatorics.permutations import Permutation from sympy.combinatorics.polyhedron import Polyhedron from sympy.abc import w, x, y, z pgroup = [Permutation([[0, 1, 2], [3]]), Permutation([[0, 1, 3], [2]]), Permutation([[0, 2, 3], [1]]), Permutation([[1, 2, 3], [0]]), Permutation([[0, 1], [2, 3]]), Permutation([[0, 2], [1, 3]]), Permutation([[0, 3], [1, 2]]), Permutation([[0, 1, 2, 3]])] corners = [w, x, y, z] faces = [(w, x, y), (w, y, z), (w, z, x), (x, y, z)] assert _test_args(Polyhedron(corners, faces, pgroup)) @XFAIL def test_sympy__combinatorics__prufer__Prufer(): from sympy.combinatorics.prufer import Prufer assert _test_args(Prufer([[0, 1], [0, 2], [0, 3]], 4)) def test_sympy__combinatorics__partitions__Partition(): from sympy.combinatorics.partitions import Partition assert _test_args(Partition([1])) @XFAIL def test_sympy__combinatorics__partitions__IntegerPartition(): from sympy.combinatorics.partitions import IntegerPartition assert _test_args(IntegerPartition([1])) def test_sympy__concrete__products__Product(): from sympy.concrete.products import Product assert _test_args(Product(x, (x, 0, 10))) assert _test_args(Product(x, (x, 0, y), (y, 0, 10))) @SKIP("abstract Class") def test_sympy__concrete__expr_with_limits__ExprWithLimits(): from sympy.concrete.expr_with_limits import ExprWithLimits assert _test_args(ExprWithLimits(x, (x, 0, 10))) assert _test_args(ExprWithLimits(x*y, (x, 0, 10.),(y,1.,3))) @SKIP("abstract Class") def test_sympy__concrete__expr_with_limits__AddWithLimits(): from sympy.concrete.expr_with_limits import AddWithLimits assert _test_args(AddWithLimits(x, (x, 0, 10))) assert _test_args(AddWithLimits(x*y, (x, 0, 10),(y,1,3))) @SKIP("abstract Class") def test_sympy__concrete__expr_with_intlimits__ExprWithIntLimits(): from sympy.concrete.expr_with_intlimits import ExprWithIntLimits assert _test_args(ExprWithIntLimits(x, (x, 0, 10))) assert _test_args(ExprWithIntLimits(x*y, (x, 0, 10),(y,1,3))) def test_sympy__concrete__summations__Sum(): from sympy.concrete.summations import Sum assert _test_args(Sum(x, (x, 0, 10))) assert _test_args(Sum(x, (x, 0, y), (y, 0, 10))) def test_sympy__core__add__Add(): from sympy.core.add import Add assert _test_args(Add(x, y, z, 2)) def test_sympy__core__basic__Atom(): from sympy.core.basic import Atom assert _test_args(Atom()) def test_sympy__core__basic__Basic(): from sympy.core.basic import Basic assert _test_args(Basic()) def test_sympy__core__containers__Dict(): from sympy.core.containers import Dict assert _test_args(Dict({x: y, y: z})) def test_sympy__core__containers__Tuple(): from sympy.core.containers import Tuple assert _test_args(Tuple(x, y, z, 2)) def test_sympy__core__expr__AtomicExpr(): from sympy.core.expr import AtomicExpr assert _test_args(AtomicExpr()) def test_sympy__core__expr__Expr(): from sympy.core.expr import Expr assert _test_args(Expr()) def test_sympy__core__function__Application(): from sympy.core.function import Application assert _test_args(Application(1, 2, 3)) def test_sympy__core__function__AppliedUndef(): from sympy.core.function import AppliedUndef assert _test_args(AppliedUndef(1, 2, 3)) def test_sympy__core__function__Derivative(): from sympy.core.function import Derivative assert _test_args(Derivative(2, x, y, 3)) @SKIP("abstract class") def test_sympy__core__function__Function(): pass def test_sympy__core__function__Lambda(): from sympy.core.function import Lambda assert _test_args(Lambda((x, y), x + y + z)) def test_sympy__core__function__Subs(): from sympy.core.function import Subs assert _test_args(Subs(x + y, x, 2)) def test_sympy__core__function__WildFunction(): from sympy.core.function import WildFunction assert _test_args(WildFunction('f')) def test_sympy__core__mod__Mod(): from sympy.core.mod import Mod assert _test_args(Mod(x, 2)) def test_sympy__core__mul__Mul(): from sympy.core.mul import Mul assert _test_args(Mul(2, x, y, z)) def test_sympy__core__numbers__Catalan(): from sympy.core.numbers import Catalan assert _test_args(Catalan()) def test_sympy__core__numbers__ComplexInfinity(): from sympy.core.numbers import ComplexInfinity assert _test_args(ComplexInfinity()) def test_sympy__core__numbers__EulerGamma(): from sympy.core.numbers import EulerGamma assert _test_args(EulerGamma()) def test_sympy__core__numbers__Exp1(): from sympy.core.numbers import Exp1 assert _test_args(Exp1()) def test_sympy__core__numbers__Float(): from sympy.core.numbers import Float assert _test_args(Float(1.23)) def test_sympy__core__numbers__GoldenRatio(): from sympy.core.numbers import GoldenRatio assert _test_args(GoldenRatio()) def test_sympy__core__numbers__Half(): from sympy.core.numbers import Half assert _test_args(Half()) def test_sympy__core__numbers__ImaginaryUnit(): from sympy.core.numbers import ImaginaryUnit assert _test_args(ImaginaryUnit()) def test_sympy__core__numbers__Infinity(): from sympy.core.numbers import Infinity assert _test_args(Infinity()) def test_sympy__core__numbers__Integer(): from sympy.core.numbers import Integer assert _test_args(Integer(7)) @SKIP("abstract class") def test_sympy__core__numbers__IntegerConstant(): pass def test_sympy__core__numbers__NaN(): from sympy.core.numbers import NaN assert _test_args(NaN()) def test_sympy__core__numbers__NegativeInfinity(): from sympy.core.numbers import NegativeInfinity assert _test_args(NegativeInfinity()) def test_sympy__core__numbers__NegativeOne(): from sympy.core.numbers import NegativeOne assert _test_args(NegativeOne()) def test_sympy__core__numbers__Number(): from sympy.core.numbers import Number assert _test_args(Number(1, 7)) def test_sympy__core__numbers__NumberSymbol(): from sympy.core.numbers import NumberSymbol assert _test_args(NumberSymbol()) def test_sympy__core__numbers__One(): from sympy.core.numbers import One assert _test_args(One()) def test_sympy__core__numbers__Pi(): from sympy.core.numbers import Pi assert _test_args(Pi()) def test_sympy__core__numbers__Rational(): from sympy.core.numbers import Rational assert _test_args(Rational(1, 7)) @SKIP("abstract class") def test_sympy__core__numbers__RationalConstant(): pass def test_sympy__core__numbers__Zero(): from sympy.core.numbers import Zero assert _test_args(Zero()) @SKIP("abstract class") def test_sympy__core__operations__AssocOp(): pass @SKIP("abstract class") def test_sympy__core__operations__LatticeOp(): pass def test_sympy__core__power__Pow(): from sympy.core.power import Pow assert _test_args(Pow(x, 2)) def test_sympy__core__relational__Equality(): from sympy.core.relational import Equality assert _test_args(Equality(x, 2)) def test_sympy__core__relational__GreaterThan(): from sympy.core.relational import GreaterThan assert _test_args(GreaterThan(x, 2)) def test_sympy__core__relational__LessThan(): from sympy.core.relational import LessThan assert _test_args(LessThan(x, 2)) @SKIP("abstract class") def test_sympy__core__relational__Relational(): pass def test_sympy__core__relational__StrictGreaterThan(): from sympy.core.relational import StrictGreaterThan assert _test_args(StrictGreaterThan(x, 2)) def test_sympy__core__relational__StrictLessThan(): from sympy.core.relational import StrictLessThan assert _test_args(StrictLessThan(x, 2)) def test_sympy__core__relational__Unequality(): from sympy.core.relational import Unequality assert _test_args(Unequality(x, 2)) def test_sympy__sets__sets__EmptySet(): from sympy.sets.sets import EmptySet assert _test_args(EmptySet()) def test_sympy__sets__sets__UniversalSet(): from sympy.sets.sets import UniversalSet assert _test_args(UniversalSet()) def test_sympy__sets__sets__FiniteSet(): from sympy.sets.sets import FiniteSet assert _test_args(FiniteSet(x, y, z)) def test_sympy__sets__sets__Interval(): from sympy.sets.sets import Interval assert _test_args(Interval(0, 1)) def test_sympy__sets__sets__ProductSet(): from sympy.sets.sets import ProductSet, Interval assert _test_args(ProductSet(Interval(0, 1), Interval(0, 1))) @SKIP("does it make sense to test this?") def test_sympy__sets__sets__Set(): from sympy.sets.sets import Set assert _test_args(Set()) def test_sympy__sets__sets__Intersection(): from sympy.sets.sets import Intersection, Interval assert _test_args(Intersection(Interval(0, 3), Interval(2, 4), evaluate=False)) def test_sympy__sets__sets__Union(): from sympy.sets.sets import Union, Interval assert _test_args(Union(Interval(0, 1), Interval(2, 3))) def test_sympy__sets__sets__Complement(): from sympy.sets.sets import Complement assert _test_args(Complement(Interval(0, 2), Interval(0, 1))) def test_sympy__sets__sets__SymmetricDifference(): from sympy.sets.sets import FiniteSet, SymmetricDifference assert _test_args(SymmetricDifference(FiniteSet(1, 2, 3), \ FiniteSet(2, 3, 4))) def test_sympy__core__trace__Tr(): from sympy.core.trace import Tr a, b = symbols('a b') assert _test_args(Tr(a + b)) def test_sympy__sets__fancysets__Naturals(): from sympy.sets.fancysets import Naturals assert _test_args(Naturals()) def test_sympy__sets__fancysets__Naturals0(): from sympy.sets.fancysets import Naturals0 assert _test_args(Naturals0()) def test_sympy__sets__fancysets__Integers(): from sympy.sets.fancysets import Integers assert _test_args(Integers()) def test_sympy__sets__fancysets__Reals(): from sympy.sets.fancysets import Reals assert _test_args(Reals()) def test_sympy__sets__fancysets__ImageSet(): from sympy.sets.fancysets import ImageSet from sympy import S, Lambda, Symbol x = Symbol('x') assert _test_args(ImageSet(Lambda(x, x**2), S.Naturals)) def test_sympy__sets__fancysets__Range(): from sympy.sets.fancysets import Range assert _test_args(Range(1, 5, 1)) def test_sympy__sets__contains__Contains(): from sympy.sets.fancysets import Range from sympy.sets.contains import Contains assert _test_args(Contains(x, Range(0, 10, 2))) # STATS from sympy.stats.crv_types import NormalDistribution nd = NormalDistribution(0, 1) from sympy.stats.frv_types import DieDistribution die = DieDistribution(6) def test_sympy__stats__crv__ContinuousDomain(): from sympy.stats.crv import ContinuousDomain assert _test_args(ContinuousDomain(set([x]), Interval(-oo, oo))) def test_sympy__stats__crv__SingleContinuousDomain(): from sympy.stats.crv import SingleContinuousDomain assert _test_args(SingleContinuousDomain(x, Interval(-oo, oo))) def test_sympy__stats__crv__ProductContinuousDomain(): from sympy.stats.crv import SingleContinuousDomain, ProductContinuousDomain D = SingleContinuousDomain(x, Interval(-oo, oo)) E = SingleContinuousDomain(y, Interval(0, oo)) assert _test_args(ProductContinuousDomain(D, E)) def test_sympy__stats__crv__ConditionalContinuousDomain(): from sympy.stats.crv import (SingleContinuousDomain, ConditionalContinuousDomain) D = SingleContinuousDomain(x, Interval(-oo, oo)) assert _test_args(ConditionalContinuousDomain(D, x > 0)) def test_sympy__stats__crv__ContinuousPSpace(): from sympy.stats.crv import ContinuousPSpace, SingleContinuousDomain D = SingleContinuousDomain(x, Interval(-oo, oo)) assert _test_args(ContinuousPSpace(D, nd)) def test_sympy__stats__crv__SingleContinuousPSpace(): from sympy.stats.crv import SingleContinuousPSpace assert _test_args(SingleContinuousPSpace(x, nd)) def test_sympy__stats__crv__ProductContinuousPSpace(): from sympy.stats.crv import ProductContinuousPSpace, SingleContinuousPSpace A = SingleContinuousPSpace(x, nd) B = SingleContinuousPSpace(y, nd) assert _test_args(ProductContinuousPSpace(A, B)) @SKIP("abstract class") def test_sympy__stats__crv__SingleContinuousDistribution(): pass def test_sympy__stats__drv__SingleDiscreteDomain(): from sympy.stats.drv import SingleDiscreteDomain assert _test_args(SingleDiscreteDomain(x, S.Naturals)) def test_sympy__stats__drv__SingleDiscretePSpace(): from sympy.stats.drv import SingleDiscretePSpace from sympy.stats.drv_types import PoissonDistribution assert _test_args(SingleDiscretePSpace(x, PoissonDistribution(1))) @SKIP("abstract class") def test_sympy__stats__drv__SingleDiscreteDistribution(): pass def test_sympy__stats__rv__RandomDomain(): from sympy.stats.rv import RandomDomain from sympy.sets.sets import FiniteSet assert _test_args(RandomDomain(FiniteSet(x), FiniteSet(1, 2, 3))) def test_sympy__stats__rv__SingleDomain(): from sympy.stats.rv import SingleDomain from sympy.sets.sets import FiniteSet assert _test_args(SingleDomain(x, FiniteSet(1, 2, 3))) def test_sympy__stats__rv__ConditionalDomain(): from sympy.stats.rv import ConditionalDomain, RandomDomain from sympy.sets.sets import FiniteSet D = RandomDomain(FiniteSet(x), FiniteSet(1, 2)) assert _test_args(ConditionalDomain(D, x > 1)) def test_sympy__stats__rv__PSpace(): from sympy.stats.rv import PSpace, RandomDomain from sympy import FiniteSet D = RandomDomain(FiniteSet(x), FiniteSet(1, 2, 3, 4, 5, 6)) assert _test_args(PSpace(D, die)) @SKIP("abstract Class") def test_sympy__stats__rv__SinglePSpace(): pass def test_sympy__stats__rv__RandomSymbol(): from sympy.stats.rv import RandomSymbol from sympy.stats.crv import SingleContinuousPSpace A = SingleContinuousPSpace(x, nd) assert _test_args(RandomSymbol(A, x)) def test_sympy__stats__rv__ProductPSpace(): from sympy.stats.rv import ProductPSpace from sympy.stats.crv import SingleContinuousPSpace A = SingleContinuousPSpace(x, nd) B = SingleContinuousPSpace(y, nd) assert _test_args(ProductPSpace(A, B)) def test_sympy__stats__rv__ProductDomain(): from sympy.stats.rv import ProductDomain, SingleDomain D = SingleDomain(x, Interval(-oo, oo)) E = SingleDomain(y, Interval(0, oo)) assert _test_args(ProductDomain(D, E)) def test_sympy__stats__frv_types__DiscreteUniformDistribution(): from sympy.stats.frv_types import DiscreteUniformDistribution from sympy.core.containers import Tuple assert _test_args(DiscreteUniformDistribution(Tuple(*list(range(6))))) def test_sympy__stats__frv_types__DieDistribution(): from sympy.stats.frv_types import DieDistribution assert _test_args(DieDistribution(6)) def test_sympy__stats__frv_types__BernoulliDistribution(): from sympy.stats.frv_types import BernoulliDistribution assert _test_args(BernoulliDistribution(S.Half, 0, 1)) def test_sympy__stats__frv_types__BinomialDistribution(): from sympy.stats.frv_types import BinomialDistribution assert _test_args(BinomialDistribution(5, S.Half, 1, 0)) def test_sympy__stats__frv_types__HypergeometricDistribution(): from sympy.stats.frv_types import HypergeometricDistribution assert _test_args(HypergeometricDistribution(10, 5, 3)) def test_sympy__stats__frv_types__RademacherDistribution(): from sympy.stats.frv_types import RademacherDistribution assert _test_args(RademacherDistribution()) def test_sympy__stats__frv__FiniteDomain(): from sympy.stats.frv import FiniteDomain assert _test_args(FiniteDomain(set([(x, 1), (x, 2)]))) # x can be 1 or 2 def test_sympy__stats__frv__SingleFiniteDomain(): from sympy.stats.frv import SingleFiniteDomain assert _test_args(SingleFiniteDomain(x, set([1, 2]))) # x can be 1 or 2 def test_sympy__stats__frv__ProductFiniteDomain(): from sympy.stats.frv import SingleFiniteDomain, ProductFiniteDomain xd = SingleFiniteDomain(x, set([1, 2])) yd = SingleFiniteDomain(y, set([1, 2])) assert _test_args(ProductFiniteDomain(xd, yd)) def test_sympy__stats__frv__ConditionalFiniteDomain(): from sympy.stats.frv import SingleFiniteDomain, ConditionalFiniteDomain xd = SingleFiniteDomain(x, set([1, 2])) assert _test_args(ConditionalFiniteDomain(xd, x > 1)) def test_sympy__stats__frv__FinitePSpace(): from sympy.stats.frv import FinitePSpace, SingleFiniteDomain xd = SingleFiniteDomain(x, set([1, 2, 3, 4, 5, 6])) p = 1.0/6 xd = SingleFiniteDomain(x, set([1, 2])) assert _test_args(FinitePSpace(xd, {(x, 1): S.Half, (x, 2): S.Half})) def test_sympy__stats__frv__SingleFinitePSpace(): from sympy.stats.frv import SingleFinitePSpace from sympy import Symbol assert _test_args(SingleFinitePSpace(Symbol('x'), die)) def test_sympy__stats__frv__ProductFinitePSpace(): from sympy.stats.frv import SingleFinitePSpace, ProductFinitePSpace from sympy import Symbol xp = SingleFinitePSpace(Symbol('x'), die) yp = SingleFinitePSpace(Symbol('y'), die) assert _test_args(ProductFinitePSpace(xp, yp)) @SKIP("abstract class") def test_sympy__stats__frv__SingleFiniteDistribution(): pass @SKIP("abstract class") def test_sympy__stats__crv__ContinuousDistribution(): pass def test_sympy__stats__frv_types__FiniteDistributionHandmade(): from sympy.stats.frv_types import FiniteDistributionHandmade assert _test_args(FiniteDistributionHandmade({1: 1})) def test_sympy__stats__crv__ContinuousDistributionHandmade(): from sympy.stats.crv import ContinuousDistributionHandmade from sympy import Symbol, Interval assert _test_args(ContinuousDistributionHandmade(Symbol('x'), Interval(0, 2))) def test_sympy__stats__rv__Density(): from sympy.stats.rv import Density from sympy.stats.crv_types import Normal assert _test_args(Density(Normal('x', 0, 1))) def test_sympy__stats__crv_types__ArcsinDistribution(): from sympy.stats.crv_types import ArcsinDistribution assert _test_args(ArcsinDistribution(0, 1)) def test_sympy__stats__crv_types__BeniniDistribution(): from sympy.stats.crv_types import BeniniDistribution assert _test_args(BeniniDistribution(1, 1, 1)) def test_sympy__stats__crv_types__BetaDistribution(): from sympy.stats.crv_types import BetaDistribution assert _test_args(BetaDistribution(1, 1)) def test_sympy__stats__crv_types__BetaPrimeDistribution(): from sympy.stats.crv_types import BetaPrimeDistribution assert _test_args(BetaPrimeDistribution(1, 1)) def test_sympy__stats__crv_types__CauchyDistribution(): from sympy.stats.crv_types import CauchyDistribution assert _test_args(CauchyDistribution(0, 1)) def test_sympy__stats__crv_types__ChiDistribution(): from sympy.stats.crv_types import ChiDistribution assert _test_args(ChiDistribution(1)) def test_sympy__stats__crv_types__ChiNoncentralDistribution(): from sympy.stats.crv_types import ChiNoncentralDistribution assert _test_args(ChiNoncentralDistribution(1,1)) def test_sympy__stats__crv_types__ChiSquaredDistribution(): from sympy.stats.crv_types import ChiSquaredDistribution assert _test_args(ChiSquaredDistribution(1)) def test_sympy__stats__crv_types__DagumDistribution(): from sympy.stats.crv_types import DagumDistribution assert _test_args(DagumDistribution(1, 1, 1)) def test_sympy__stats__crv_types__ExponentialDistribution(): from sympy.stats.crv_types import ExponentialDistribution assert _test_args(ExponentialDistribution(1)) def test_sympy__stats__crv_types__FDistributionDistribution(): from sympy.stats.crv_types import FDistributionDistribution assert _test_args(FDistributionDistribution(1, 1)) def test_sympy__stats__crv_types__FisherZDistribution(): from sympy.stats.crv_types import FisherZDistribution assert _test_args(FisherZDistribution(1, 1)) def test_sympy__stats__crv_types__FrechetDistribution(): from sympy.stats.crv_types import FrechetDistribution assert _test_args(FrechetDistribution(1, 1, 1)) def test_sympy__stats__crv_types__GammaInverseDistribution(): from sympy.stats.crv_types import GammaInverseDistribution assert _test_args(GammaInverseDistribution(1, 1)) def test_sympy__stats__crv_types__GammaDistribution(): from sympy.stats.crv_types import GammaDistribution assert _test_args(GammaDistribution(1, 1)) def test_sympy__stats__crv_types__KumaraswamyDistribution(): from sympy.stats.crv_types import KumaraswamyDistribution assert _test_args(KumaraswamyDistribution(1, 1)) def test_sympy__stats__crv_types__LaplaceDistribution(): from sympy.stats.crv_types import LaplaceDistribution assert _test_args(LaplaceDistribution(0, 1)) def test_sympy__stats__crv_types__LogisticDistribution(): from sympy.stats.crv_types import LogisticDistribution assert _test_args(LogisticDistribution(0, 1)) def test_sympy__stats__crv_types__LogNormalDistribution(): from sympy.stats.crv_types import LogNormalDistribution assert _test_args(LogNormalDistribution(0, 1)) def test_sympy__stats__crv_types__MaxwellDistribution(): from sympy.stats.crv_types import MaxwellDistribution assert _test_args(MaxwellDistribution(1)) def test_sympy__stats__crv_types__NakagamiDistribution(): from sympy.stats.crv_types import NakagamiDistribution assert _test_args(NakagamiDistribution(1, 1)) def test_sympy__stats__crv_types__NormalDistribution(): from sympy.stats.crv_types import NormalDistribution assert _test_args(NormalDistribution(0, 1)) def test_sympy__stats__crv_types__ParetoDistribution(): from sympy.stats.crv_types import ParetoDistribution assert _test_args(ParetoDistribution(1, 1)) def test_sympy__stats__crv_types__QuadraticUDistribution(): from sympy.stats.crv_types import QuadraticUDistribution assert _test_args(QuadraticUDistribution(1, 2)) def test_sympy__stats__crv_types__RaisedCosineDistribution(): from sympy.stats.crv_types import RaisedCosineDistribution assert _test_args(RaisedCosineDistribution(1, 1)) def test_sympy__stats__crv_types__RayleighDistribution(): from sympy.stats.crv_types import RayleighDistribution assert _test_args(RayleighDistribution(1)) def test_sympy__stats__crv_types__StudentTDistribution(): from sympy.stats.crv_types import StudentTDistribution assert _test_args(StudentTDistribution(1)) def test_sympy__stats__crv_types__TriangularDistribution(): from sympy.stats.crv_types import TriangularDistribution assert _test_args(TriangularDistribution(-1, 0, 1)) def test_sympy__stats__crv_types__UniformDistribution(): from sympy.stats.crv_types import UniformDistribution assert _test_args(UniformDistribution(0, 1)) def test_sympy__stats__crv_types__UniformSumDistribution(): from sympy.stats.crv_types import UniformSumDistribution assert _test_args(UniformSumDistribution(1)) def test_sympy__stats__crv_types__VonMisesDistribution(): from sympy.stats.crv_types import VonMisesDistribution assert _test_args(VonMisesDistribution(1, 1)) def test_sympy__stats__crv_types__WeibullDistribution(): from sympy.stats.crv_types import WeibullDistribution assert _test_args(WeibullDistribution(1, 1)) def test_sympy__stats__crv_types__WignerSemicircleDistribution(): from sympy.stats.crv_types import WignerSemicircleDistribution assert _test_args(WignerSemicircleDistribution(1)) def test_sympy__stats__drv_types__PoissonDistribution(): from sympy.stats.drv_types import PoissonDistribution assert _test_args(PoissonDistribution(1)) def test_sympy__stats__drv_types__GeometricDistribution(): from sympy.stats.drv_types import GeometricDistribution assert _test_args(GeometricDistribution(.5)) def test_sympy__core__symbol__Dummy(): from sympy.core.symbol import Dummy assert _test_args(Dummy('t')) def test_sympy__core__symbol__Symbol(): from sympy.core.symbol import Symbol assert _test_args(Symbol('t')) def test_sympy__core__symbol__Wild(): from sympy.core.symbol import Wild assert _test_args(Wild('x', exclude=[x])) @SKIP("abstract class") def test_sympy__functions__combinatorial__factorials__CombinatorialFunction(): pass def test_sympy__functions__combinatorial__factorials__FallingFactorial(): from sympy.functions.combinatorial.factorials import FallingFactorial assert _test_args(FallingFactorial(2, x)) def test_sympy__functions__combinatorial__factorials__MultiFactorial(): from sympy.functions.combinatorial.factorials import MultiFactorial assert _test_args(MultiFactorial(x)) def test_sympy__functions__combinatorial__factorials__RisingFactorial(): from sympy.functions.combinatorial.factorials import RisingFactorial assert _test_args(RisingFactorial(2, x)) def test_sympy__functions__combinatorial__factorials__binomial(): from sympy.functions.combinatorial.factorials import binomial assert _test_args(binomial(2, x)) def test_sympy__functions__combinatorial__factorials__subfactorial(): from sympy.functions.combinatorial.factorials import subfactorial assert _test_args(subfactorial(1)) def test_sympy__functions__combinatorial__factorials__factorial(): from sympy.functions.combinatorial.factorials import factorial assert _test_args(factorial(x)) def test_sympy__functions__combinatorial__factorials__factorial2(): from sympy.functions.combinatorial.factorials import factorial2 assert _test_args(factorial2(x)) def test_sympy__functions__combinatorial__numbers__bell(): from sympy.functions.combinatorial.numbers import bell assert _test_args(bell(x, y)) def test_sympy__functions__combinatorial__numbers__bernoulli(): from sympy.functions.combinatorial.numbers import bernoulli assert _test_args(bernoulli(x)) def test_sympy__functions__combinatorial__numbers__catalan(): from sympy.functions.combinatorial.numbers import catalan assert _test_args(catalan(x)) def test_sympy__functions__combinatorial__numbers__genocchi(): from sympy.functions.combinatorial.numbers import genocchi assert _test_args(genocchi(x)) def test_sympy__functions__combinatorial__numbers__euler(): from sympy.functions.combinatorial.numbers import euler assert _test_args(euler(x)) def test_sympy__functions__combinatorial__numbers__fibonacci(): from sympy.functions.combinatorial.numbers import fibonacci assert _test_args(fibonacci(x)) def test_sympy__functions__combinatorial__numbers__harmonic(): from sympy.functions.combinatorial.numbers import harmonic assert _test_args(harmonic(x, 2)) def test_sympy__functions__combinatorial__numbers__lucas(): from sympy.functions.combinatorial.numbers import lucas assert _test_args(lucas(x)) def test_sympy__functions__elementary__complexes__Abs(): from sympy.functions.elementary.complexes import Abs assert _test_args(Abs(x)) def test_sympy__functions__elementary__complexes__adjoint(): from sympy.functions.elementary.complexes import adjoint assert _test_args(adjoint(x)) def test_sympy__functions__elementary__complexes__arg(): from sympy.functions.elementary.complexes import arg assert _test_args(arg(x)) def test_sympy__functions__elementary__complexes__conjugate(): from sympy.functions.elementary.complexes import conjugate assert _test_args(conjugate(x)) def test_sympy__functions__elementary__complexes__im(): from sympy.functions.elementary.complexes import im assert _test_args(im(x)) def test_sympy__functions__elementary__complexes__re(): from sympy.functions.elementary.complexes import re assert _test_args(re(x)) def test_sympy__functions__elementary__complexes__sign(): from sympy.functions.elementary.complexes import sign assert _test_args(sign(x)) def test_sympy__functions__elementary__complexes__polar_lift(): from sympy.functions.elementary.complexes import polar_lift assert _test_args(polar_lift(x)) def test_sympy__functions__elementary__complexes__periodic_argument(): from sympy.functions.elementary.complexes import periodic_argument assert _test_args(periodic_argument(x, y)) def test_sympy__functions__elementary__complexes__principal_branch(): from sympy.functions.elementary.complexes import principal_branch assert _test_args(principal_branch(x, y)) def test_sympy__functions__elementary__complexes__transpose(): from sympy.functions.elementary.complexes import transpose assert _test_args(transpose(x)) def test_sympy__functions__elementary__exponential__LambertW(): from sympy.functions.elementary.exponential import LambertW assert _test_args(LambertW(2)) @SKIP("abstract class") def test_sympy__functions__elementary__exponential__ExpBase(): pass def test_sympy__functions__elementary__exponential__exp(): from sympy.functions.elementary.exponential import exp assert _test_args(exp(2)) def test_sympy__functions__elementary__exponential__exp_polar(): from sympy.functions.elementary.exponential import exp_polar assert _test_args(exp_polar(2)) def test_sympy__functions__elementary__exponential__log(): from sympy.functions.elementary.exponential import log assert _test_args(log(2)) @SKIP("abstract class") def test_sympy__functions__elementary__hyperbolic__HyperbolicFunction(): pass @SKIP("abstract class") def test_sympy__functions__elementary__hyperbolic__ReciprocalHyperbolicFunction(): pass def test_sympy__functions__elementary__hyperbolic__acosh(): from sympy.functions.elementary.hyperbolic import acosh assert _test_args(acosh(2)) def test_sympy__functions__elementary__hyperbolic__acoth(): from sympy.functions.elementary.hyperbolic import acoth assert _test_args(acoth(2)) def test_sympy__functions__elementary__hyperbolic__asinh(): from sympy.functions.elementary.hyperbolic import asinh assert _test_args(asinh(2)) def test_sympy__functions__elementary__hyperbolic__atanh(): from sympy.functions.elementary.hyperbolic import atanh assert _test_args(atanh(2)) def test_sympy__functions__elementary__hyperbolic__cosh(): from sympy.functions.elementary.hyperbolic import cosh assert _test_args(cosh(2)) def test_sympy__functions__elementary__hyperbolic__coth(): from sympy.functions.elementary.hyperbolic import coth assert _test_args(coth(2)) def test_sympy__functions__elementary__hyperbolic__csch(): from sympy.functions.elementary.hyperbolic import csch assert _test_args(csch(2)) def test_sympy__functions__elementary__hyperbolic__sech(): from sympy.functions.elementary.hyperbolic import sech assert _test_args(sech(2)) def test_sympy__functions__elementary__hyperbolic__sinh(): from sympy.functions.elementary.hyperbolic import sinh assert _test_args(sinh(2)) def test_sympy__functions__elementary__hyperbolic__tanh(): from sympy.functions.elementary.hyperbolic import tanh assert _test_args(tanh(2)) @SKIP("does this work at all?") def test_sympy__functions__elementary__integers__RoundFunction(): from sympy.functions.elementary.integers import RoundFunction assert _test_args(RoundFunction()) def test_sympy__functions__elementary__integers__ceiling(): from sympy.functions.elementary.integers import ceiling assert _test_args(ceiling(x)) def test_sympy__functions__elementary__integers__floor(): from sympy.functions.elementary.integers import floor assert _test_args(floor(x)) def test_sympy__functions__elementary__miscellaneous__IdentityFunction(): from sympy.functions.elementary.miscellaneous import IdentityFunction assert _test_args(IdentityFunction()) def test_sympy__functions__elementary__miscellaneous__Max(): from sympy.functions.elementary.miscellaneous import Max assert _test_args(Max(x, 2)) def test_sympy__functions__elementary__miscellaneous__Min(): from sympy.functions.elementary.miscellaneous import Min assert _test_args(Min(x, 2)) @SKIP("abstract class") def test_sympy__functions__elementary__miscellaneous__MinMaxBase(): pass def test_sympy__functions__elementary__piecewise__ExprCondPair(): from sympy.functions.elementary.piecewise import ExprCondPair assert _test_args(ExprCondPair(1, True)) def test_sympy__functions__elementary__piecewise__Piecewise(): from sympy.functions.elementary.piecewise import Piecewise assert _test_args(Piecewise((1, x >= 0), (0, True))) @SKIP("abstract class") def test_sympy__functions__elementary__trigonometric__TrigonometricFunction(): pass @SKIP("abstract class") def test_sympy__functions__elementary__trigonometric__ReciprocalTrigonometricFunction(): pass @SKIP("abstract class") def test_sympy__functions__elementary__trigonometric__InverseTrigonometricFunction(): pass def test_sympy__functions__elementary__trigonometric__acos(): from sympy.functions.elementary.trigonometric import acos assert _test_args(acos(2)) def test_sympy__functions__elementary__trigonometric__acot(): from sympy.functions.elementary.trigonometric import acot assert _test_args(acot(2)) def test_sympy__functions__elementary__trigonometric__asin(): from sympy.functions.elementary.trigonometric import asin assert _test_args(asin(2)) def test_sympy__functions__elementary__trigonometric__asec(): from sympy.functions.elementary.trigonometric import asec assert _test_args(asec(2)) def test_sympy__functions__elementary__trigonometric__acsc(): from sympy.functions.elementary.trigonometric import acsc assert _test_args(acsc(2)) def test_sympy__functions__elementary__trigonometric__atan(): from sympy.functions.elementary.trigonometric import atan assert _test_args(atan(2)) def test_sympy__functions__elementary__trigonometric__atan2(): from sympy.functions.elementary.trigonometric import atan2 assert _test_args(atan2(2, 3)) def test_sympy__functions__elementary__trigonometric__cos(): from sympy.functions.elementary.trigonometric import cos assert _test_args(cos(2)) def test_sympy__functions__elementary__trigonometric__csc(): from sympy.functions.elementary.trigonometric import csc assert _test_args(csc(2)) def test_sympy__functions__elementary__trigonometric__cot(): from sympy.functions.elementary.trigonometric import cot assert _test_args(cot(2)) def test_sympy__functions__elementary__trigonometric__sin(): assert _test_args(sin(2)) def test_sympy__functions__elementary__trigonometric__sec(): from sympy.functions.elementary.trigonometric import sec assert _test_args(sec(2)) def test_sympy__functions__elementary__trigonometric__tan(): from sympy.functions.elementary.trigonometric import tan assert _test_args(tan(2)) @SKIP("abstract class") def test_sympy__functions__special__bessel__BesselBase(): pass @SKIP("abstract class") def test_sympy__functions__special__bessel__SphericalBesselBase(): pass def test_sympy__functions__special__bessel__besseli(): from sympy.functions.special.bessel import besseli assert _test_args(besseli(x, 1)) def test_sympy__functions__special__bessel__besselj(): from sympy.functions.special.bessel import besselj assert _test_args(besselj(x, 1)) def test_sympy__functions__special__bessel__besselk(): from sympy.functions.special.bessel import besselk assert _test_args(besselk(x, 1)) def test_sympy__functions__special__bessel__bessely(): from sympy.functions.special.bessel import bessely assert _test_args(bessely(x, 1)) def test_sympy__functions__special__bessel__hankel1(): from sympy.functions.special.bessel import hankel1 assert _test_args(hankel1(x, 1)) def test_sympy__functions__special__bessel__hankel2(): from sympy.functions.special.bessel import hankel2 assert _test_args(hankel2(x, 1)) def test_sympy__functions__special__bessel__jn(): from sympy.functions.special.bessel import jn assert _test_args(jn(0, x)) def test_sympy__functions__special__bessel__yn(): from sympy.functions.special.bessel import yn assert _test_args(yn(0, x)) def test_sympy__functions__special__bessel__AiryBase(): pass def test_sympy__functions__special__bessel__airyai(): from sympy.functions.special.bessel import airyai assert _test_args(airyai(2)) def test_sympy__functions__special__bessel__airybi(): from sympy.functions.special.bessel import airybi assert _test_args(airybi(2)) def test_sympy__functions__special__bessel__airyaiprime(): from sympy.functions.special.bessel import airyaiprime assert _test_args(airyaiprime(2)) def test_sympy__functions__special__bessel__airybiprime(): from sympy.functions.special.bessel import airybiprime assert _test_args(airybiprime(2)) def test_sympy__functions__special__elliptic_integrals__elliptic_k(): from sympy.functions.special.elliptic_integrals import elliptic_k as K assert _test_args(K(x)) def test_sympy__functions__special__elliptic_integrals__elliptic_f(): from sympy.functions.special.elliptic_integrals import elliptic_f as F assert _test_args(F(x, y)) def test_sympy__functions__special__elliptic_integrals__elliptic_e(): from sympy.functions.special.elliptic_integrals import elliptic_e as E assert _test_args(E(x)) assert _test_args(E(x, y)) def test_sympy__functions__special__elliptic_integrals__elliptic_pi(): from sympy.functions.special.elliptic_integrals import elliptic_pi as P assert _test_args(P(x, y)) assert _test_args(P(x, y, z)) def test_sympy__functions__special__delta_functions__DiracDelta(): from sympy.functions.special.delta_functions import DiracDelta assert _test_args(DiracDelta(x, 1)) def test_sympy__functions__special__delta_functions__Heaviside(): from sympy.functions.special.delta_functions import Heaviside assert _test_args(Heaviside(x)) def test_sympy__functions__special__error_functions__erf(): from sympy.functions.special.error_functions import erf assert _test_args(erf(2)) def test_sympy__functions__special__error_functions__erfc(): from sympy.functions.special.error_functions import erfc assert _test_args(erfc(2)) def test_sympy__functions__special__error_functions__erfi(): from sympy.functions.special.error_functions import erfi assert _test_args(erfi(2)) def test_sympy__functions__special__error_functions__erf2(): from sympy.functions.special.error_functions import erf2 assert _test_args(erf2(2, 3)) def test_sympy__functions__special__error_functions__erfinv(): from sympy.functions.special.error_functions import erfinv assert _test_args(erfinv(2)) def test_sympy__functions__special__error_functions__erfcinv(): from sympy.functions.special.error_functions import erfcinv assert _test_args(erfcinv(2)) def test_sympy__functions__special__error_functions__erf2inv(): from sympy.functions.special.error_functions import erf2inv assert _test_args(erf2inv(2, 3)) @SKIP("abstract class") def test_sympy__functions__special__error_functions__FresnelIntegral(): pass def test_sympy__functions__special__error_functions__fresnels(): from sympy.functions.special.error_functions import fresnels assert _test_args(fresnels(2)) def test_sympy__functions__special__error_functions__fresnelc(): from sympy.functions.special.error_functions import fresnelc assert _test_args(fresnelc(2)) def test_sympy__functions__special__error_functions__erfs(): from sympy.functions.special.error_functions import _erfs assert _test_args(_erfs(2)) def test_sympy__functions__special__error_functions__Ei(): from sympy.functions.special.error_functions import Ei assert _test_args(Ei(2)) def test_sympy__functions__special__error_functions__li(): from sympy.functions.special.error_functions import li assert _test_args(li(2)) def test_sympy__functions__special__error_functions__Li(): from sympy.functions.special.error_functions import Li assert _test_args(Li(2)) @SKIP("abstract class") def test_sympy__functions__special__error_functions__TrigonometricIntegral(): pass def test_sympy__functions__special__error_functions__Si(): from sympy.functions.special.error_functions import Si assert _test_args(Si(2)) def test_sympy__functions__special__error_functions__Ci(): from sympy.functions.special.error_functions import Ci assert _test_args(Ci(2)) def test_sympy__functions__special__error_functions__Shi(): from sympy.functions.special.error_functions import Shi assert _test_args(Shi(2)) def test_sympy__functions__special__error_functions__Chi(): from sympy.functions.special.error_functions import Chi assert _test_args(Chi(2)) def test_sympy__functions__special__error_functions__expint(): from sympy.functions.special.error_functions import expint assert _test_args(expint(y, x)) def test_sympy__functions__special__gamma_functions__gamma(): from sympy.functions.special.gamma_functions import gamma assert _test_args(gamma(x)) def test_sympy__functions__special__gamma_functions__loggamma(): from sympy.functions.special.gamma_functions import loggamma assert _test_args(loggamma(2)) def test_sympy__functions__special__gamma_functions__lowergamma(): from sympy.functions.special.gamma_functions import lowergamma assert _test_args(lowergamma(x, 2)) def test_sympy__functions__special__gamma_functions__polygamma(): from sympy.functions.special.gamma_functions import polygamma assert _test_args(polygamma(x, 2)) def test_sympy__functions__special__gamma_functions__uppergamma(): from sympy.functions.special.gamma_functions import uppergamma assert _test_args(uppergamma(x, 2)) def test_sympy__functions__special__beta_functions__beta(): from sympy.functions.special.beta_functions import beta assert _test_args(beta(x, x)) @SKIP("abstract class") def test_sympy__functions__special__hyper__TupleParametersBase(): pass @SKIP("abstract class") def test_sympy__functions__special__hyper__TupleArg(): pass def test_sympy__functions__special__hyper__hyper(): from sympy.functions.special.hyper import hyper assert _test_args(hyper([1, 2, 3], [4, 5], x)) def test_sympy__functions__special__hyper__meijerg(): from sympy.functions.special.hyper import meijerg assert _test_args(meijerg([1, 2, 3], [4, 5], [6], [], x)) @SKIP("abstract class") def test_sympy__functions__special__hyper__HyperRep(): pass def test_sympy__functions__special__hyper__HyperRep_power1(): from sympy.functions.special.hyper import HyperRep_power1 assert _test_args(HyperRep_power1(x, y)) def test_sympy__functions__special__hyper__HyperRep_power2(): from sympy.functions.special.hyper import HyperRep_power2 assert _test_args(HyperRep_power2(x, y)) def test_sympy__functions__special__hyper__HyperRep_log1(): from sympy.functions.special.hyper import HyperRep_log1 assert _test_args(HyperRep_log1(x)) def test_sympy__functions__special__hyper__HyperRep_atanh(): from sympy.functions.special.hyper import HyperRep_atanh assert _test_args(HyperRep_atanh(x)) def test_sympy__functions__special__hyper__HyperRep_asin1(): from sympy.functions.special.hyper import HyperRep_asin1 assert _test_args(HyperRep_asin1(x)) def test_sympy__functions__special__hyper__HyperRep_asin2(): from sympy.functions.special.hyper import HyperRep_asin2 assert _test_args(HyperRep_asin2(x)) def test_sympy__functions__special__hyper__HyperRep_sqrts1(): from sympy.functions.special.hyper import HyperRep_sqrts1 assert _test_args(HyperRep_sqrts1(x, y)) def test_sympy__functions__special__hyper__HyperRep_sqrts2(): from sympy.functions.special.hyper import HyperRep_sqrts2 assert _test_args(HyperRep_sqrts2(x, y)) def test_sympy__functions__special__hyper__HyperRep_log2(): from sympy.functions.special.hyper import HyperRep_log2 assert _test_args(HyperRep_log2(x)) def test_sympy__functions__special__hyper__HyperRep_cosasin(): from sympy.functions.special.hyper import HyperRep_cosasin assert _test_args(HyperRep_cosasin(x, y)) def test_sympy__functions__special__hyper__HyperRep_sinasin(): from sympy.functions.special.hyper import HyperRep_sinasin assert _test_args(HyperRep_sinasin(x, y)) @SKIP("abstract class") def test_sympy__functions__special__polynomials__OrthogonalPolynomial(): pass def test_sympy__functions__special__polynomials__jacobi(): from sympy.functions.special.polynomials import jacobi assert _test_args(jacobi(x, 2, 2, 2)) def test_sympy__functions__special__polynomials__gegenbauer(): from sympy.functions.special.polynomials import gegenbauer assert _test_args(gegenbauer(x, 2, 2)) def test_sympy__functions__special__polynomials__chebyshevt(): from sympy.functions.special.polynomials import chebyshevt assert _test_args(chebyshevt(x, 2)) def test_sympy__functions__special__polynomials__chebyshevt_root(): from sympy.functions.special.polynomials import chebyshevt_root assert _test_args(chebyshevt_root(3, 2)) def test_sympy__functions__special__polynomials__chebyshevu(): from sympy.functions.special.polynomials import chebyshevu assert _test_args(chebyshevu(x, 2)) def test_sympy__functions__special__polynomials__chebyshevu_root(): from sympy.functions.special.polynomials import chebyshevu_root assert _test_args(chebyshevu_root(3, 2)) def test_sympy__functions__special__polynomials__hermite(): from sympy.functions.special.polynomials import hermite assert _test_args(hermite(x, 2)) def test_sympy__functions__special__polynomials__legendre(): from sympy.functions.special.polynomials import legendre assert _test_args(legendre(x, 2)) def test_sympy__functions__special__polynomials__assoc_legendre(): from sympy.functions.special.polynomials import assoc_legendre assert _test_args(assoc_legendre(x, 0, y)) def test_sympy__functions__special__polynomials__laguerre(): from sympy.functions.special.polynomials import laguerre assert _test_args(laguerre(x, 2)) def test_sympy__functions__special__polynomials__assoc_laguerre(): from sympy.functions.special.polynomials import assoc_laguerre assert _test_args(assoc_laguerre(x, 0, y)) def test_sympy__functions__special__spherical_harmonics__Ynm(): from sympy.functions.special.spherical_harmonics import Ynm assert _test_args(Ynm(1, 1, x, y)) def test_sympy__functions__special__spherical_harmonics__Znm(): from sympy.functions.special.spherical_harmonics import Znm assert _test_args(Znm(1, 1, x, y)) def test_sympy__functions__special__tensor_functions__LeviCivita(): from sympy.functions.special.tensor_functions import LeviCivita assert _test_args(LeviCivita(x, y, 2)) def test_sympy__functions__special__tensor_functions__KroneckerDelta(): from sympy.functions.special.tensor_functions import KroneckerDelta assert _test_args(KroneckerDelta(x, y)) def test_sympy__functions__special__zeta_functions__dirichlet_eta(): from sympy.functions.special.zeta_functions import dirichlet_eta assert _test_args(dirichlet_eta(x)) def test_sympy__functions__special__zeta_functions__zeta(): from sympy.functions.special.zeta_functions import zeta assert _test_args(zeta(101)) def test_sympy__functions__special__zeta_functions__lerchphi(): from sympy.functions.special.zeta_functions import lerchphi assert _test_args(lerchphi(x, y, z)) def test_sympy__functions__special__zeta_functions__polylog(): from sympy.functions.special.zeta_functions import polylog assert _test_args(polylog(x, y)) def test_sympy__integrals__integrals__Integral(): from sympy.integrals.integrals import Integral assert _test_args(Integral(2, (x, 0, 1))) def test_sympy__integrals__risch__NonElementaryIntegral(): from sympy.integrals.risch import NonElementaryIntegral assert _test_args(NonElementaryIntegral(exp(-x**2), x)) @SKIP("abstract class") def test_sympy__integrals__transforms__IntegralTransform(): pass def test_sympy__integrals__transforms__MellinTransform(): from sympy.integrals.transforms import MellinTransform assert _test_args(MellinTransform(2, x, y)) def test_sympy__integrals__transforms__InverseMellinTransform(): from sympy.integrals.transforms import InverseMellinTransform assert _test_args(InverseMellinTransform(2, x, y, 0, 1)) def test_sympy__integrals__transforms__LaplaceTransform(): from sympy.integrals.transforms import LaplaceTransform assert _test_args(LaplaceTransform(2, x, y)) def test_sympy__integrals__transforms__InverseLaplaceTransform(): from sympy.integrals.transforms import InverseLaplaceTransform assert _test_args(InverseLaplaceTransform(2, x, y, 0)) @SKIP("abstract class") def test_sympy__integrals__transforms__FourierTypeTransform(): pass def test_sympy__integrals__transforms__InverseFourierTransform(): from sympy.integrals.transforms import InverseFourierTransform assert _test_args(InverseFourierTransform(2, x, y)) def test_sympy__integrals__transforms__FourierTransform(): from sympy.integrals.transforms import FourierTransform assert _test_args(FourierTransform(2, x, y)) @SKIP("abstract class") def test_sympy__integrals__transforms__SineCosineTypeTransform(): pass def test_sympy__integrals__transforms__InverseSineTransform(): from sympy.integrals.transforms import InverseSineTransform assert _test_args(InverseSineTransform(2, x, y)) def test_sympy__integrals__transforms__SineTransform(): from sympy.integrals.transforms import SineTransform assert _test_args(SineTransform(2, x, y)) def test_sympy__integrals__transforms__InverseCosineTransform(): from sympy.integrals.transforms import InverseCosineTransform assert _test_args(InverseCosineTransform(2, x, y)) def test_sympy__integrals__transforms__CosineTransform(): from sympy.integrals.transforms import CosineTransform assert _test_args(CosineTransform(2, x, y)) @SKIP("abstract class") def test_sympy__integrals__transforms__HankelTypeTransform(): pass def test_sympy__integrals__transforms__InverseHankelTransform(): from sympy.integrals.transforms import InverseHankelTransform assert _test_args(InverseHankelTransform(2, x, y, 0)) def test_sympy__integrals__transforms__HankelTransform(): from sympy.integrals.transforms import HankelTransform assert _test_args(HankelTransform(2, x, y, 0)) @XFAIL def test_sympy__liealgebras__cartan_type__CartanType_generator(): from sympy.liealgebras.cartan_type import CartanType_generator assert _test_args(CartanType_generator("A2")) @XFAIL def test_sympy__liealgebras__cartan_type__Standard_Cartan(): from sympy.liealgebras.cartan_type import Standard_Cartan assert _test_args(Standard_Cartan("A", 2)) @XFAIL def test_sympy__liealgebras__weyl_group__WeylGroup(): from sympy.liealgebras.weyl_group import WeylGroup assert _test_args(WeylGroup("B4")) @XFAIL def test_sympy__liealgebras__root_system__RootSystem(): from sympy.liealgebras.root_system import RootSystem assert _test_args(RootSystem("A2")) @XFAIL def test_sympy__liealgebras__type_a__TypeA(): from sympy.liealgebras.type_a import TypeA assert _test_args(TypeA(2)) @XFAIL def test_sympy__liealgebras__type_b__TypeB(): from sympy.liealgebras.type_b import TypeB assert _test_args(TypeB(4)) @XFAIL def test_sympy__liealgebras__type_c__TypeC(): from sympy.liealgebras.type_c import TypeC assert _test_args(TypeC(4)) @XFAIL def test_sympy__liealgebras__type_d__TypeD(): from sympy.liealgebras.type_d import TypeD assert _test_args(TypeD(4)) @XFAIL def test_sympy__liealgebras__type_e__TypeE(): from sympy.liealgebras.type_e import TypeE assert _test_args(TypeE(6)) @XFAIL def test_sympy__liealgebras__type_f__TypeF(): from sympy.liealgebras.type_f import TypeF assert _test_args(TypeF(4)) @XFAIL def test_sympy__liealgebras__type_g__TypeG(): from sympy.liealgebras.type_g import TypeG assert _test_args(TypeG(2)) def test_sympy__logic__boolalg__And(): from sympy.logic.boolalg import And assert _test_args(And(x, y, 2)) @SKIP("abstract class") def test_sympy__logic__boolalg__Boolean(): pass def test_sympy__logic__boolalg__BooleanFunction(): from sympy.logic.boolalg import BooleanFunction assert _test_args(BooleanFunction(1, 2, 3)) @SKIP("abstract class") def test_sympy__logic__boolalg__BooleanAtom(): pass def test_sympy__logic__boolalg__BooleanTrue(): from sympy.logic.boolalg import true assert _test_args(true) def test_sympy__logic__boolalg__BooleanFalse(): from sympy.logic.boolalg import false assert _test_args(false) def test_sympy__logic__boolalg__Equivalent(): from sympy.logic.boolalg import Equivalent assert _test_args(Equivalent(x, 2)) def test_sympy__logic__boolalg__ITE(): from sympy.logic.boolalg import ITE assert _test_args(ITE(x, y, 2)) def test_sympy__logic__boolalg__Implies(): from sympy.logic.boolalg import Implies assert _test_args(Implies(x, y)) def test_sympy__logic__boolalg__Nand(): from sympy.logic.boolalg import Nand assert _test_args(Nand(x, y, 2)) def test_sympy__logic__boolalg__Nor(): from sympy.logic.boolalg import Nor assert _test_args(Nor(x, y)) def test_sympy__logic__boolalg__Not(): from sympy.logic.boolalg import Not assert _test_args(Not(x)) def test_sympy__logic__boolalg__Or(): from sympy.logic.boolalg import Or assert _test_args(Or(x, y)) def test_sympy__logic__boolalg__Xor(): from sympy.logic.boolalg import Xor assert _test_args(Xor(x, y, 2)) def test_sympy__matrices__matrices__DeferredVector(): from sympy.matrices.matrices import DeferredVector assert _test_args(DeferredVector("X")) @SKIP("abstract class") def test_sympy__matrices__expressions__matexpr__MatrixBase(): pass def test_sympy__matrices__immutable__ImmutableMatrix(): from sympy.matrices.immutable import ImmutableMatrix m = ImmutableMatrix([[1, 2], [3, 4]]) assert _test_args(m) assert _test_args(Basic(*list(m))) m = ImmutableMatrix(1, 1, [1]) assert _test_args(m) assert _test_args(Basic(*list(m))) m = ImmutableMatrix(2, 2, lambda i, j: 1) assert m[0, 0] is S.One m = ImmutableMatrix(2, 2, lambda i, j: 1/(1 + i) + 1/(1 + j)) assert m[1, 1] is S.One # true div. will give 1.0 if i,j not sympified assert _test_args(m) assert _test_args(Basic(*list(m))) def test_sympy__matrices__immutable__ImmutableSparseMatrix(): from sympy.matrices.immutable import ImmutableSparseMatrix m = ImmutableSparseMatrix([[1, 2], [3, 4]]) assert _test_args(m) assert _test_args(Basic(*list(m))) m = ImmutableSparseMatrix(1, 1, {(0, 0): 1}) assert _test_args(m) assert _test_args(Basic(*list(m))) m = ImmutableSparseMatrix(1, 1, [1]) assert _test_args(m) assert _test_args(Basic(*list(m))) m = ImmutableSparseMatrix(2, 2, lambda i, j: 1) assert m[0, 0] is S.One m = ImmutableSparseMatrix(2, 2, lambda i, j: 1/(1 + i) + 1/(1 + j)) assert m[1, 1] is S.One # true div. will give 1.0 if i,j not sympified assert _test_args(m) assert _test_args(Basic(*list(m))) def test_sympy__matrices__expressions__slice__MatrixSlice(): from sympy.matrices.expressions.slice import MatrixSlice from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', 4, 4) assert _test_args(MatrixSlice(X, (0, 2), (0, 2))) def test_sympy__matrices__expressions__blockmatrix__BlockDiagMatrix(): from sympy.matrices.expressions.blockmatrix import BlockDiagMatrix from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', x, x) Y = MatrixSymbol('Y', y, y) assert _test_args(BlockDiagMatrix(X, Y)) def test_sympy__matrices__expressions__blockmatrix__BlockMatrix(): from sympy.matrices.expressions.blockmatrix import BlockMatrix from sympy.matrices.expressions import MatrixSymbol, ZeroMatrix X = MatrixSymbol('X', x, x) Y = MatrixSymbol('Y', y, y) Z = MatrixSymbol('Z', x, y) O = ZeroMatrix(y, x) assert _test_args(BlockMatrix([[X, Z], [O, Y]])) def test_sympy__matrices__expressions__inverse__Inverse(): from sympy.matrices.expressions.inverse import Inverse from sympy.matrices.expressions import MatrixSymbol assert _test_args(Inverse(MatrixSymbol('A', 3, 3))) def test_sympy__matrices__expressions__matadd__MatAdd(): from sympy.matrices.expressions.matadd import MatAdd from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', x, y) Y = MatrixSymbol('Y', x, y) assert _test_args(MatAdd(X, Y)) def test_sympy__matrices__expressions__matexpr__Identity(): from sympy.matrices.expressions.matexpr import Identity assert _test_args(Identity(3)) @SKIP("abstract class") def test_sympy__matrices__expressions__matexpr__MatrixExpr(): pass def test_sympy__matrices__expressions__matexpr__MatrixElement(): from sympy.matrices.expressions.matexpr import MatrixSymbol, MatrixElement from sympy import S assert _test_args(MatrixElement(MatrixSymbol('A', 3, 5), S(2), S(3))) @XFAIL def test_sympy__matrices__expressions__matexpr__MatrixSymbol(): from sympy.matrices.expressions.matexpr import MatrixSymbol assert _test_args(MatrixSymbol('A', 3, 5)) def test_sympy__matrices__expressions__matexpr__ZeroMatrix(): from sympy.matrices.expressions.matexpr import ZeroMatrix assert _test_args(ZeroMatrix(3, 5)) def test_sympy__matrices__expressions__matmul__MatMul(): from sympy.matrices.expressions.matmul import MatMul from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', x, y) Y = MatrixSymbol('Y', y, x) assert _test_args(MatMul(X, Y)) def test_sympy__matrices__expressions__diagonal__DiagonalMatrix(): from sympy.matrices.expressions.diagonal import DiagonalMatrix from sympy.matrices.expressions import MatrixSymbol x = MatrixSymbol('x', 10, 1) assert _test_args(DiagonalMatrix(x)) def test_sympy__matrices__expressions__diagonal__DiagonalOf(): from sympy.matrices.expressions.diagonal import DiagonalOf from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('x', 10, 10) assert _test_args(DiagonalOf(X)) def test_sympy__matrices__expressions__hadamard__HadamardProduct(): from sympy.matrices.expressions.hadamard import HadamardProduct from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', x, y) Y = MatrixSymbol('Y', x, y) assert _test_args(HadamardProduct(X, Y)) def test_sympy__matrices__expressions__matpow__MatPow(): from sympy.matrices.expressions.matpow import MatPow from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', x, x) assert _test_args(MatPow(X, 2)) def test_sympy__matrices__expressions__transpose__Transpose(): from sympy.matrices.expressions.transpose import Transpose from sympy.matrices.expressions import MatrixSymbol assert _test_args(Transpose(MatrixSymbol('A', 3, 5))) def test_sympy__matrices__expressions__adjoint__Adjoint(): from sympy.matrices.expressions.adjoint import Adjoint from sympy.matrices.expressions import MatrixSymbol assert _test_args(Adjoint(MatrixSymbol('A', 3, 5))) def test_sympy__matrices__expressions__trace__Trace(): from sympy.matrices.expressions.trace import Trace from sympy.matrices.expressions import MatrixSymbol assert _test_args(Trace(MatrixSymbol('A', 3, 3))) def test_sympy__matrices__expressions__determinant__Determinant(): from sympy.matrices.expressions.determinant import Determinant from sympy.matrices.expressions import MatrixSymbol assert _test_args(Determinant(MatrixSymbol('A', 3, 3))) def test_sympy__matrices__expressions__funcmatrix__FunctionMatrix(): from sympy.matrices.expressions.funcmatrix import FunctionMatrix from sympy import Lambda, symbols i, j = symbols('i,j') assert _test_args(FunctionMatrix(3, 3, Lambda((i, j), i - j) )) def test_sympy__matrices__expressions__fourier__DFT(): from sympy.matrices.expressions.fourier import DFT from sympy import S assert _test_args(DFT(S(2))) def test_sympy__matrices__expressions__fourier__IDFT(): from sympy.matrices.expressions.fourier import IDFT from sympy import S assert _test_args(IDFT(S(2))) from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', 10, 10) def test_sympy__matrices__expressions__factorizations__LofLU(): from sympy.matrices.expressions.factorizations import LofLU assert _test_args(LofLU(X)) def test_sympy__matrices__expressions__factorizations__UofLU(): from sympy.matrices.expressions.factorizations import UofLU assert _test_args(UofLU(X)) def test_sympy__matrices__expressions__factorizations__QofQR(): from sympy.matrices.expressions.factorizations import QofQR assert _test_args(QofQR(X)) def test_sympy__matrices__expressions__factorizations__RofQR(): from sympy.matrices.expressions.factorizations import RofQR assert _test_args(RofQR(X)) def test_sympy__matrices__expressions__factorizations__LofCholesky(): from sympy.matrices.expressions.factorizations import LofCholesky assert _test_args(LofCholesky(X)) def test_sympy__matrices__expressions__factorizations__UofCholesky(): from sympy.matrices.expressions.factorizations import UofCholesky assert _test_args(UofCholesky(X)) def test_sympy__matrices__expressions__factorizations__EigenVectors(): from sympy.matrices.expressions.factorizations import EigenVectors assert _test_args(EigenVectors(X)) def test_sympy__matrices__expressions__factorizations__EigenValues(): from sympy.matrices.expressions.factorizations import EigenValues assert _test_args(EigenValues(X)) def test_sympy__matrices__expressions__factorizations__UofSVD(): from sympy.matrices.expressions.factorizations import UofSVD assert _test_args(UofSVD(X)) def test_sympy__matrices__expressions__factorizations__VofSVD(): from sympy.matrices.expressions.factorizations import VofSVD assert _test_args(VofSVD(X)) def test_sympy__matrices__expressions__factorizations__SofSVD(): from sympy.matrices.expressions.factorizations import SofSVD assert _test_args(SofSVD(X)) @SKIP("abstract class") def test_sympy__matrices__expressions__factorizations__Factorization(): pass def test_sympy__physics__vector__frame__CoordinateSym(): from sympy.physics.vector import CoordinateSym from sympy.physics.vector import ReferenceFrame assert _test_args(CoordinateSym('R_x', ReferenceFrame('R'), 0)) def test_sympy__physics__paulialgebra__Pauli(): from sympy.physics.paulialgebra import Pauli assert _test_args(Pauli(1)) def test_sympy__physics__quantum__anticommutator__AntiCommutator(): from sympy.physics.quantum.anticommutator import AntiCommutator assert _test_args(AntiCommutator(x, y)) def test_sympy__physics__quantum__cartesian__PositionBra3D(): from sympy.physics.quantum.cartesian import PositionBra3D assert _test_args(PositionBra3D(x, y, z)) def test_sympy__physics__quantum__cartesian__PositionKet3D(): from sympy.physics.quantum.cartesian import PositionKet3D assert _test_args(PositionKet3D(x, y, z)) def test_sympy__physics__quantum__cartesian__PositionState3D(): from sympy.physics.quantum.cartesian import PositionState3D assert _test_args(PositionState3D(x, y, z)) def test_sympy__physics__quantum__cartesian__PxBra(): from sympy.physics.quantum.cartesian import PxBra assert _test_args(PxBra(x, y, z)) def test_sympy__physics__quantum__cartesian__PxKet(): from sympy.physics.quantum.cartesian import PxKet assert _test_args(PxKet(x, y, z)) def test_sympy__physics__quantum__cartesian__PxOp(): from sympy.physics.quantum.cartesian import PxOp assert _test_args(PxOp(x, y, z)) def test_sympy__physics__quantum__cartesian__XBra(): from sympy.physics.quantum.cartesian import XBra assert _test_args(XBra(x)) def test_sympy__physics__quantum__cartesian__XKet(): from sympy.physics.quantum.cartesian import XKet assert _test_args(XKet(x)) def test_sympy__physics__quantum__cartesian__XOp(): from sympy.physics.quantum.cartesian import XOp assert _test_args(XOp(x)) def test_sympy__physics__quantum__cartesian__YOp(): from sympy.physics.quantum.cartesian import YOp assert _test_args(YOp(x)) def test_sympy__physics__quantum__cartesian__ZOp(): from sympy.physics.quantum.cartesian import ZOp assert _test_args(ZOp(x)) def test_sympy__physics__quantum__cg__CG(): from sympy.physics.quantum.cg import CG from sympy import S assert _test_args(CG(S(3)/2, S(3)/2, S(1)/2, -S(1)/2, 1, 1)) def test_sympy__physics__quantum__cg__Wigner3j(): from sympy.physics.quantum.cg import Wigner3j assert _test_args(Wigner3j(6, 0, 4, 0, 2, 0)) def test_sympy__physics__quantum__cg__Wigner6j(): from sympy.physics.quantum.cg import Wigner6j assert _test_args(Wigner6j(1, 2, 3, 2, 1, 2)) def test_sympy__physics__quantum__cg__Wigner9j(): from sympy.physics.quantum.cg import Wigner9j assert _test_args(Wigner9j(2, 1, 1, S(3)/2, S(1)/2, 1, S(1)/2, S(1)/2, 0)) def test_sympy__physics__quantum__circuitplot__Mz(): from sympy.physics.quantum.circuitplot import Mz assert _test_args(Mz(0)) def test_sympy__physics__quantum__circuitplot__Mx(): from sympy.physics.quantum.circuitplot import Mx assert _test_args(Mx(0)) def test_sympy__physics__quantum__commutator__Commutator(): from sympy.physics.quantum.commutator import Commutator A, B = symbols('A,B', commutative=False) assert _test_args(Commutator(A, B)) def test_sympy__physics__quantum__constants__HBar(): from sympy.physics.quantum.constants import HBar assert _test_args(HBar()) def test_sympy__physics__quantum__dagger__Dagger(): from sympy.physics.quantum.dagger import Dagger from sympy.physics.quantum.state import Ket assert _test_args(Dagger(Dagger(Ket('psi')))) def test_sympy__physics__quantum__gate__CGate(): from sympy.physics.quantum.gate import CGate, Gate assert _test_args(CGate((0, 1), Gate(2))) def test_sympy__physics__quantum__gate__CGateS(): from sympy.physics.quantum.gate import CGateS, Gate assert _test_args(CGateS((0, 1), Gate(2))) def test_sympy__physics__quantum__gate__CNotGate(): from sympy.physics.quantum.gate import CNotGate assert _test_args(CNotGate(0, 1)) def test_sympy__physics__quantum__gate__Gate(): from sympy.physics.quantum.gate import Gate assert _test_args(Gate(0)) def test_sympy__physics__quantum__gate__HadamardGate(): from sympy.physics.quantum.gate import HadamardGate assert _test_args(HadamardGate(0)) def test_sympy__physics__quantum__gate__IdentityGate(): from sympy.physics.quantum.gate import IdentityGate assert _test_args(IdentityGate(0)) def test_sympy__physics__quantum__gate__OneQubitGate(): from sympy.physics.quantum.gate import OneQubitGate assert _test_args(OneQubitGate(0)) def test_sympy__physics__quantum__gate__PhaseGate(): from sympy.physics.quantum.gate import PhaseGate assert _test_args(PhaseGate(0)) def test_sympy__physics__quantum__gate__SwapGate(): from sympy.physics.quantum.gate import SwapGate assert _test_args(SwapGate(0, 1)) def test_sympy__physics__quantum__gate__TGate(): from sympy.physics.quantum.gate import TGate assert _test_args(TGate(0)) def test_sympy__physics__quantum__gate__TwoQubitGate(): from sympy.physics.quantum.gate import TwoQubitGate assert _test_args(TwoQubitGate(0)) def test_sympy__physics__quantum__gate__UGate(): from sympy.physics.quantum.gate import UGate from sympy.matrices.immutable import ImmutableMatrix from sympy import Integer, Tuple assert _test_args( UGate(Tuple(Integer(1)), ImmutableMatrix([[1, 0], [0, 2]]))) def test_sympy__physics__quantum__gate__XGate(): from sympy.physics.quantum.gate import XGate assert _test_args(XGate(0)) def test_sympy__physics__quantum__gate__YGate(): from sympy.physics.quantum.gate import YGate assert _test_args(YGate(0)) def test_sympy__physics__quantum__gate__ZGate(): from sympy.physics.quantum.gate import ZGate assert _test_args(ZGate(0)) @SKIP("TODO: sympy.physics") def test_sympy__physics__quantum__grover__OracleGate(): from sympy.physics.quantum.grover import OracleGate assert _test_args(OracleGate()) def test_sympy__physics__quantum__grover__WGate(): from sympy.physics.quantum.grover import WGate assert _test_args(WGate(1)) def test_sympy__physics__quantum__hilbert__ComplexSpace(): from sympy.physics.quantum.hilbert import ComplexSpace assert _test_args(ComplexSpace(x)) def test_sympy__physics__quantum__hilbert__DirectSumHilbertSpace(): from sympy.physics.quantum.hilbert import DirectSumHilbertSpace, ComplexSpace, FockSpace c = ComplexSpace(2) f = FockSpace() assert _test_args(DirectSumHilbertSpace(c, f)) def test_sympy__physics__quantum__hilbert__FockSpace(): from sympy.physics.quantum.hilbert import FockSpace assert _test_args(FockSpace()) def test_sympy__physics__quantum__hilbert__HilbertSpace(): from sympy.physics.quantum.hilbert import HilbertSpace assert _test_args(HilbertSpace()) def test_sympy__physics__quantum__hilbert__L2(): from sympy.physics.quantum.hilbert import L2 from sympy import oo, Interval assert _test_args(L2(Interval(0, oo))) def test_sympy__physics__quantum__hilbert__TensorPowerHilbertSpace(): from sympy.physics.quantum.hilbert import TensorPowerHilbertSpace, FockSpace f = FockSpace() assert _test_args(TensorPowerHilbertSpace(f, 2)) def test_sympy__physics__quantum__hilbert__TensorProductHilbertSpace(): from sympy.physics.quantum.hilbert import TensorProductHilbertSpace, FockSpace, ComplexSpace c = ComplexSpace(2) f = FockSpace() assert _test_args(TensorProductHilbertSpace(f, c)) def test_sympy__physics__quantum__innerproduct__InnerProduct(): from sympy.physics.quantum import Bra, Ket, InnerProduct b = Bra('b') k = Ket('k') assert _test_args(InnerProduct(b, k)) def test_sympy__physics__quantum__operator__DifferentialOperator(): from sympy.physics.quantum.operator import DifferentialOperator from sympy import Derivative, Function f = Function('f') assert _test_args(DifferentialOperator(1/x*Derivative(f(x), x), f(x))) def test_sympy__physics__quantum__operator__HermitianOperator(): from sympy.physics.quantum.operator import HermitianOperator assert _test_args(HermitianOperator('H')) def test_sympy__physics__quantum__operator__IdentityOperator(): from sympy.physics.quantum.operator import IdentityOperator assert _test_args(IdentityOperator(5)) def test_sympy__physics__quantum__operator__Operator(): from sympy.physics.quantum.operator import Operator assert _test_args(Operator('A')) def test_sympy__physics__quantum__operator__OuterProduct(): from sympy.physics.quantum.operator import OuterProduct from sympy.physics.quantum import Ket, Bra b = Bra('b') k = Ket('k') assert _test_args(OuterProduct(k, b)) def test_sympy__physics__quantum__operator__UnitaryOperator(): from sympy.physics.quantum.operator import UnitaryOperator assert _test_args(UnitaryOperator('U')) def test_sympy__physics__quantum__piab__PIABBra(): from sympy.physics.quantum.piab import PIABBra assert _test_args(PIABBra('B')) def test_sympy__physics__quantum__boson__BosonOp(): from sympy.physics.quantum.boson import BosonOp assert _test_args(BosonOp('a')) assert _test_args(BosonOp('a', False)) def test_sympy__physics__quantum__boson__BosonFockKet(): from sympy.physics.quantum.boson import BosonFockKet assert _test_args(BosonFockKet(1)) def test_sympy__physics__quantum__boson__BosonFockBra(): from sympy.physics.quantum.boson import BosonFockBra assert _test_args(BosonFockBra(1)) def test_sympy__physics__quantum__boson__BosonCoherentKet(): from sympy.physics.quantum.boson import BosonCoherentKet assert _test_args(BosonCoherentKet(1)) def test_sympy__physics__quantum__boson__BosonCoherentBra(): from sympy.physics.quantum.boson import BosonCoherentBra assert _test_args(BosonCoherentBra(1)) def test_sympy__physics__quantum__fermion__FermionOp(): from sympy.physics.quantum.fermion import FermionOp assert _test_args(FermionOp('c')) assert _test_args(FermionOp('c', False)) def test_sympy__physics__quantum__fermion__FermionFockKet(): from sympy.physics.quantum.fermion import FermionFockKet assert _test_args(FermionFockKet(1)) def test_sympy__physics__quantum__fermion__FermionFockBra(): from sympy.physics.quantum.fermion import FermionFockBra assert _test_args(FermionFockBra(1)) def test_sympy__physics__quantum__pauli__SigmaOpBase(): from sympy.physics.quantum.pauli import SigmaOpBase assert _test_args(SigmaOpBase()) def test_sympy__physics__quantum__pauli__SigmaX(): from sympy.physics.quantum.pauli import SigmaX assert _test_args(SigmaX()) def test_sympy__physics__quantum__pauli__SigmaY(): from sympy.physics.quantum.pauli import SigmaY assert _test_args(SigmaY()) def test_sympy__physics__quantum__pauli__SigmaZ(): from sympy.physics.quantum.pauli import SigmaZ assert _test_args(SigmaZ()) def test_sympy__physics__quantum__pauli__SigmaMinus(): from sympy.physics.quantum.pauli import SigmaMinus assert _test_args(SigmaMinus()) def test_sympy__physics__quantum__pauli__SigmaPlus(): from sympy.physics.quantum.pauli import SigmaPlus assert _test_args(SigmaPlus()) def test_sympy__physics__quantum__pauli__SigmaZKet(): from sympy.physics.quantum.pauli import SigmaZKet assert _test_args(SigmaZKet(0)) def test_sympy__physics__quantum__pauli__SigmaZBra(): from sympy.physics.quantum.pauli import SigmaZBra assert _test_args(SigmaZBra(0)) def test_sympy__physics__quantum__piab__PIABHamiltonian(): from sympy.physics.quantum.piab import PIABHamiltonian assert _test_args(PIABHamiltonian('P')) def test_sympy__physics__quantum__piab__PIABKet(): from sympy.physics.quantum.piab import PIABKet assert _test_args(PIABKet('K')) def test_sympy__physics__quantum__qexpr__QExpr(): from sympy.physics.quantum.qexpr import QExpr assert _test_args(QExpr(0)) def test_sympy__physics__quantum__qft__Fourier(): from sympy.physics.quantum.qft import Fourier assert _test_args(Fourier(0, 1)) def test_sympy__physics__quantum__qft__IQFT(): from sympy.physics.quantum.qft import IQFT assert _test_args(IQFT(0, 1)) def test_sympy__physics__quantum__qft__QFT(): from sympy.physics.quantum.qft import QFT assert _test_args(QFT(0, 1)) def test_sympy__physics__quantum__qft__RkGate(): from sympy.physics.quantum.qft import RkGate assert _test_args(RkGate(0, 1)) def test_sympy__physics__quantum__qubit__IntQubit(): from sympy.physics.quantum.qubit import IntQubit assert _test_args(IntQubit(0)) def test_sympy__physics__quantum__qubit__IntQubitBra(): from sympy.physics.quantum.qubit import IntQubitBra assert _test_args(IntQubitBra(0)) def test_sympy__physics__quantum__qubit__IntQubitState(): from sympy.physics.quantum.qubit import IntQubitState, QubitState assert _test_args(IntQubitState(QubitState(0, 1))) def test_sympy__physics__quantum__qubit__Qubit(): from sympy.physics.quantum.qubit import Qubit assert _test_args(Qubit(0, 0, 0)) def test_sympy__physics__quantum__qubit__QubitBra(): from sympy.physics.quantum.qubit import QubitBra assert _test_args(QubitBra('1', 0)) def test_sympy__physics__quantum__qubit__QubitState(): from sympy.physics.quantum.qubit import QubitState assert _test_args(QubitState(0, 1)) def test_sympy__physics__quantum__density__Density(): from sympy.physics.quantum.density import Density from sympy.physics.quantum.state import Ket assert _test_args(Density([Ket(0), 0.5], [Ket(1), 0.5])) @SKIP("TODO: sympy.physics.quantum.shor: Cmod Not Implemented") def test_sympy__physics__quantum__shor__CMod(): from sympy.physics.quantum.shor import CMod assert _test_args(CMod()) def test_sympy__physics__quantum__spin__CoupledSpinState(): from sympy.physics.quantum.spin import CoupledSpinState assert _test_args(CoupledSpinState(1, 0, (1, 1))) assert _test_args(CoupledSpinState(1, 0, (1, S(1)/2, S(1)/2))) assert _test_args(CoupledSpinState( 1, 0, (1, S(1)/2, S(1)/2), ((2, 3, S(1)/2), (1, 2, 1)) )) j, m, j1, j2, j3, j12, x = symbols('j m j1:4 j12 x') assert CoupledSpinState( j, m, (j1, j2, j3)).subs(j2, x) == CoupledSpinState(j, m, (j1, x, j3)) assert CoupledSpinState(j, m, (j1, j2, j3), ((1, 3, j12), (1, 2, j)) ).subs(j12, x) == \ CoupledSpinState(j, m, (j1, j2, j3), ((1, 3, x), (1, 2, j)) ) def test_sympy__physics__quantum__spin__J2Op(): from sympy.physics.quantum.spin import J2Op assert _test_args(J2Op('J')) def test_sympy__physics__quantum__spin__JminusOp(): from sympy.physics.quantum.spin import JminusOp assert _test_args(JminusOp('J')) def test_sympy__physics__quantum__spin__JplusOp(): from sympy.physics.quantum.spin import JplusOp assert _test_args(JplusOp('J')) def test_sympy__physics__quantum__spin__JxBra(): from sympy.physics.quantum.spin import JxBra assert _test_args(JxBra(1, 0)) def test_sympy__physics__quantum__spin__JxBraCoupled(): from sympy.physics.quantum.spin import JxBraCoupled assert _test_args(JxBraCoupled(1, 0, (1, 1))) def test_sympy__physics__quantum__spin__JxKet(): from sympy.physics.quantum.spin import JxKet assert _test_args(JxKet(1, 0)) def test_sympy__physics__quantum__spin__JxKetCoupled(): from sympy.physics.quantum.spin import JxKetCoupled assert _test_args(JxKetCoupled(1, 0, (1, 1))) def test_sympy__physics__quantum__spin__JxOp(): from sympy.physics.quantum.spin import JxOp assert _test_args(JxOp('J')) def test_sympy__physics__quantum__spin__JyBra(): from sympy.physics.quantum.spin import JyBra assert _test_args(JyBra(1, 0)) def test_sympy__physics__quantum__spin__JyBraCoupled(): from sympy.physics.quantum.spin import JyBraCoupled assert _test_args(JyBraCoupled(1, 0, (1, 1))) def test_sympy__physics__quantum__spin__JyKet(): from sympy.physics.quantum.spin import JyKet assert _test_args(JyKet(1, 0)) def test_sympy__physics__quantum__spin__JyKetCoupled(): from sympy.physics.quantum.spin import JyKetCoupled assert _test_args(JyKetCoupled(1, 0, (1, 1))) def test_sympy__physics__quantum__spin__JyOp(): from sympy.physics.quantum.spin import JyOp assert _test_args(JyOp('J')) def test_sympy__physics__quantum__spin__JzBra(): from sympy.physics.quantum.spin import JzBra assert _test_args(JzBra(1, 0)) def test_sympy__physics__quantum__spin__JzBraCoupled(): from sympy.physics.quantum.spin import JzBraCoupled assert _test_args(JzBraCoupled(1, 0, (1, 1))) def test_sympy__physics__quantum__spin__JzKet(): from sympy.physics.quantum.spin import JzKet assert _test_args(JzKet(1, 0)) def test_sympy__physics__quantum__spin__JzKetCoupled(): from sympy.physics.quantum.spin import JzKetCoupled assert _test_args(JzKetCoupled(1, 0, (1, 1))) def test_sympy__physics__quantum__spin__JzOp(): from sympy.physics.quantum.spin import JzOp assert _test_args(JzOp('J')) def test_sympy__physics__quantum__spin__Rotation(): from sympy.physics.quantum.spin import Rotation from sympy import pi assert _test_args(Rotation(pi, 0, pi/2)) def test_sympy__physics__quantum__spin__SpinState(): from sympy.physics.quantum.spin import SpinState assert _test_args(SpinState(1, 0)) def test_sympy__physics__quantum__spin__WignerD(): from sympy.physics.quantum.spin import WignerD assert _test_args(WignerD(0, 1, 2, 3, 4, 5)) def test_sympy__physics__quantum__state__Bra(): from sympy.physics.quantum.state import Bra assert _test_args(Bra(0)) def test_sympy__physics__quantum__state__BraBase(): from sympy.physics.quantum.state import BraBase assert _test_args(BraBase(0)) def test_sympy__physics__quantum__state__Ket(): from sympy.physics.quantum.state import Ket assert _test_args(Ket(0)) def test_sympy__physics__quantum__state__KetBase(): from sympy.physics.quantum.state import KetBase assert _test_args(KetBase(0)) def test_sympy__physics__quantum__state__State(): from sympy.physics.quantum.state import State assert _test_args(State(0)) def test_sympy__physics__quantum__state__StateBase(): from sympy.physics.quantum.state import StateBase assert _test_args(StateBase(0)) def test_sympy__physics__quantum__state__TimeDepBra(): from sympy.physics.quantum.state import TimeDepBra assert _test_args(TimeDepBra('psi', 't')) def test_sympy__physics__quantum__state__TimeDepKet(): from sympy.physics.quantum.state import TimeDepKet assert _test_args(TimeDepKet('psi', 't')) def test_sympy__physics__quantum__state__TimeDepState(): from sympy.physics.quantum.state import TimeDepState assert _test_args(TimeDepState('psi', 't')) def test_sympy__physics__quantum__state__Wavefunction(): from sympy.physics.quantum.state import Wavefunction from sympy.functions import sin from sympy import Piecewise, pi n = 1 L = 1 g = Piecewise((0, x < 0), (0, x > L), (sqrt(2//L)*sin(n*pi*x/L), True)) assert _test_args(Wavefunction(g, x)) def test_sympy__physics__quantum__tensorproduct__TensorProduct(): from sympy.physics.quantum.tensorproduct import TensorProduct assert _test_args(TensorProduct(x, y)) def test_sympy__physics__quantum__identitysearch__GateIdentity(): from sympy.physics.quantum.gate import X from sympy.physics.quantum.identitysearch import GateIdentity assert _test_args(GateIdentity(X(0), X(0))) def test_sympy__physics__quantum__sho1d__SHOOp(): from sympy.physics.quantum.sho1d import SHOOp assert _test_args(SHOOp('a')) def test_sympy__physics__quantum__sho1d__RaisingOp(): from sympy.physics.quantum.sho1d import RaisingOp assert _test_args(RaisingOp('a')) def test_sympy__physics__quantum__sho1d__LoweringOp(): from sympy.physics.quantum.sho1d import LoweringOp assert _test_args(LoweringOp('a')) def test_sympy__physics__quantum__sho1d__NumberOp(): from sympy.physics.quantum.sho1d import NumberOp assert _test_args(NumberOp('N')) def test_sympy__physics__quantum__sho1d__Hamiltonian(): from sympy.physics.quantum.sho1d import Hamiltonian assert _test_args(Hamiltonian('H')) def test_sympy__physics__quantum__sho1d__SHOState(): from sympy.physics.quantum.sho1d import SHOState assert _test_args(SHOState(0)) def test_sympy__physics__quantum__sho1d__SHOKet(): from sympy.physics.quantum.sho1d import SHOKet assert _test_args(SHOKet(0)) def test_sympy__physics__quantum__sho1d__SHOBra(): from sympy.physics.quantum.sho1d import SHOBra assert _test_args(SHOBra(0)) def test_sympy__physics__secondquant__AnnihilateBoson(): from sympy.physics.secondquant import AnnihilateBoson assert _test_args(AnnihilateBoson(0)) def test_sympy__physics__secondquant__AnnihilateFermion(): from sympy.physics.secondquant import AnnihilateFermion assert _test_args(AnnihilateFermion(0)) @SKIP("abstract class") def test_sympy__physics__secondquant__Annihilator(): pass def test_sympy__physics__secondquant__AntiSymmetricTensor(): from sympy.physics.secondquant import AntiSymmetricTensor i, j = symbols('i j', below_fermi=True) a, b = symbols('a b', above_fermi=True) assert _test_args(AntiSymmetricTensor('v', (a, i), (b, j))) def test_sympy__physics__secondquant__BosonState(): from sympy.physics.secondquant import BosonState assert _test_args(BosonState((0, 1))) @SKIP("abstract class") def test_sympy__physics__secondquant__BosonicOperator(): pass def test_sympy__physics__secondquant__Commutator(): from sympy.physics.secondquant import Commutator assert _test_args(Commutator(x, y)) def test_sympy__physics__secondquant__CreateBoson(): from sympy.physics.secondquant import CreateBoson assert _test_args(CreateBoson(0)) def test_sympy__physics__secondquant__CreateFermion(): from sympy.physics.secondquant import CreateFermion assert _test_args(CreateFermion(0)) @SKIP("abstract class") def test_sympy__physics__secondquant__Creator(): pass def test_sympy__physics__secondquant__Dagger(): from sympy.physics.secondquant import Dagger from sympy import I assert _test_args(Dagger(2*I)) def test_sympy__physics__secondquant__FermionState(): from sympy.physics.secondquant import FermionState assert _test_args(FermionState((0, 1))) def test_sympy__physics__secondquant__FermionicOperator(): from sympy.physics.secondquant import FermionicOperator assert _test_args(FermionicOperator(0)) def test_sympy__physics__secondquant__FockState(): from sympy.physics.secondquant import FockState assert _test_args(FockState((0, 1))) def test_sympy__physics__secondquant__FockStateBosonBra(): from sympy.physics.secondquant import FockStateBosonBra assert _test_args(FockStateBosonBra((0, 1))) def test_sympy__physics__secondquant__FockStateBosonKet(): from sympy.physics.secondquant import FockStateBosonKet assert _test_args(FockStateBosonKet((0, 1))) def test_sympy__physics__secondquant__FockStateBra(): from sympy.physics.secondquant import FockStateBra assert _test_args(FockStateBra((0, 1))) def test_sympy__physics__secondquant__FockStateFermionBra(): from sympy.physics.secondquant import FockStateFermionBra assert _test_args(FockStateFermionBra((0, 1))) def test_sympy__physics__secondquant__FockStateFermionKet(): from sympy.physics.secondquant import FockStateFermionKet assert _test_args(FockStateFermionKet((0, 1))) def test_sympy__physics__secondquant__FockStateKet(): from sympy.physics.secondquant import FockStateKet assert _test_args(FockStateKet((0, 1))) def test_sympy__physics__secondquant__InnerProduct(): from sympy.physics.secondquant import InnerProduct from sympy.physics.secondquant import FockStateKet, FockStateBra assert _test_args(InnerProduct(FockStateBra((0, 1)), FockStateKet((0, 1)))) def test_sympy__physics__secondquant__NO(): from sympy.physics.secondquant import NO, F, Fd assert _test_args(NO(Fd(x)*F(y))) def test_sympy__physics__secondquant__PermutationOperator(): from sympy.physics.secondquant import PermutationOperator assert _test_args(PermutationOperator(0, 1)) def test_sympy__physics__secondquant__SqOperator(): from sympy.physics.secondquant import SqOperator assert _test_args(SqOperator(0)) def test_sympy__physics__secondquant__TensorSymbol(): from sympy.physics.secondquant import TensorSymbol assert _test_args(TensorSymbol(x)) def test_sympy__physics__units__Unit(): from sympy.physics.units import Unit assert _test_args(Unit("meter", "m")) def test_sympy__physics__unitsystems__dimensions__Dimension(): from sympy.physics.unitsystems.dimensions import Dimension assert _test_args(Dimension(name="length", symbol="L", length=1)) def test_sympy__physics__unitsystems__quantities__Quantity(): from sympy.physics.unitsystems.quantities import Quantity from sympy.physics.unitsystems.systems import mks assert _test_args(Quantity(10, mks["m"])) def test_sympy__physics__unitsystems__units__Constant(): from sympy.physics.unitsystems.units import Constant from sympy.physics.unitsystems.dimensions import Dimension length = Dimension(length=1) assert _test_args(Constant(length, abbrev="u", factor=10)) def test_sympy__physics__unitsystems__units__Unit(): from sympy.physics.unitsystems.units import Unit from sympy.physics.unitsystems.dimensions import Dimension length = Dimension(length=1) assert _test_args(Unit(length, abbrev="u", factor=10)) def test_sympy__core__numbers__AlgebraicNumber(): from sympy.core.numbers import AlgebraicNumber assert _test_args(AlgebraicNumber(sqrt(2), [1, 2, 3])) def test_sympy__polys__polytools__GroebnerBasis(): from sympy.polys.polytools import GroebnerBasis assert _test_args(GroebnerBasis([x, y, z], x, y, z)) def test_sympy__polys__polytools__Poly(): from sympy.polys.polytools import Poly assert _test_args(Poly(2, x, y)) def test_sympy__polys__polytools__PurePoly(): from sympy.polys.polytools import PurePoly assert _test_args(PurePoly(2, x, y)) def test_sympy__polys__rootoftools__RootOf(): from sympy.polys.rootoftools import RootOf assert _test_args(RootOf(x**3 + x + 1, 0)) def test_sympy__polys__rootoftools__RootSum(): from sympy.polys.rootoftools import RootSum assert _test_args(RootSum(x**3 + x + 1, sin)) def test_sympy__series__limits__Limit(): from sympy.series.limits import Limit assert _test_args(Limit(x, x, 0, dir='-')) def test_sympy__series__order__Order(): from sympy.series.order import Order assert _test_args(Order(1, x, y)) def test_sympy__simplify__hyperexpand__Hyper_Function(): from sympy.simplify.hyperexpand import Hyper_Function assert _test_args(Hyper_Function([2], [1])) def test_sympy__simplify__hyperexpand__G_Function(): from sympy.simplify.hyperexpand import G_Function assert _test_args(G_Function([2], [1], [], [])) def test_sympy__tensor__indexed__Idx(): from sympy.tensor.indexed import Idx assert _test_args(Idx('test')) assert _test_args(Idx(1, (0, 10))) def test_sympy__tensor__indexed__Indexed(): from sympy.tensor.indexed import Indexed, Idx assert _test_args(Indexed('A', Idx('i'), Idx('j'))) def test_sympy__tensor__indexed__IndexedBase(): from sympy.tensor.indexed import IndexedBase assert _test_args(IndexedBase('A', shape=(x, y))) assert _test_args(IndexedBase('A', 1)) assert _test_args(IndexedBase('A')[0, 1]) @XFAIL def test_sympy__physics__hep__gamma_matrices__GammaMatrixHead(): # This test fails, this class can be reconstructed from the *args # of an instance using `TensorHead(*args)` from sympy.physics.hep.gamma_matrices import GammaMatrixHead, Lorentz from sympy.tensor.tensor import tensor_indices i = tensor_indices('i', Lorentz) assert _test_args(GammaMatrixHead()) def test_sympy__tensor__tensor__TensorIndexType(): from sympy.tensor.tensor import TensorIndexType assert _test_args(TensorIndexType('Lorentz', metric=False)) def test_sympy__tensor__tensor__TensorSymmetry(): from sympy.tensor.tensor import TensorSymmetry, get_symmetric_group_sgs assert _test_args(TensorSymmetry(get_symmetric_group_sgs(2))) def test_sympy__tensor__tensor__TensorType(): from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, get_symmetric_group_sgs, TensorType Lorentz = TensorIndexType('Lorentz', dummy_fmt='L') sym = TensorSymmetry(get_symmetric_group_sgs(1)) assert _test_args(TensorType([Lorentz], sym)) def test_sympy__tensor__tensor__TensorHead(): from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, TensorType, get_symmetric_group_sgs, TensorHead Lorentz = TensorIndexType('Lorentz', dummy_fmt='L') sym = TensorSymmetry(get_symmetric_group_sgs(1)) S1 = TensorType([Lorentz], sym) assert _test_args(TensorHead('p', S1, 0)) def test_sympy__tensor__tensor__TensorIndex(): from sympy.tensor.tensor import TensorIndexType, TensorIndex Lorentz = TensorIndexType('Lorentz', dummy_fmt='L') assert _test_args(TensorIndex('i', Lorentz)) @SKIP("abstract class") def test_sympy__tensor__tensor__TensExpr(): pass def test_sympy__tensor__tensor__TensAdd(): from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, TensorType, get_symmetric_group_sgs, tensor_indices, TensAdd Lorentz = TensorIndexType('Lorentz', dummy_fmt='L') a, b = tensor_indices('a,b', Lorentz) sym = TensorSymmetry(get_symmetric_group_sgs(1)) S1 = TensorType([Lorentz], sym) p, q = S1('p,q') t1 = p(a) t2 = q(a) assert _test_args(TensAdd(t1, t2)) def test_sympy__tensor__tensor__Tensor(): from sympy.core import S from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, TensorType, get_symmetric_group_sgs, tensor_indices, TensMul, TIDS Lorentz = TensorIndexType('Lorentz', dummy_fmt='L') a, b = tensor_indices('a,b', Lorentz) sym = TensorSymmetry(get_symmetric_group_sgs(1)) S1 = TensorType([Lorentz], sym) p = S1('p') assert _test_args(p(a)) def test_sympy__tensor__tensor__TensMul(): from sympy.core import S from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, TensorType, get_symmetric_group_sgs, tensor_indices, TensMul, TIDS Lorentz = TensorIndexType('Lorentz', dummy_fmt='L') a, b = tensor_indices('a,b', Lorentz) sym = TensorSymmetry(get_symmetric_group_sgs(1)) S1 = TensorType([Lorentz], sym) p = S1('p') q = S1('q') assert _test_args(3*p(a)*q(b)) def test_as_coeff_add(): assert (7, (3*x, 4*x**2)) == (7 + 3*x + 4*x**2).as_coeff_add() def test_sympy__geometry__curve__Curve(): from sympy.geometry.curve import Curve assert _test_args(Curve((x, 1), (x, 0, 1))) def test_sympy__geometry__point__Point(): from sympy.geometry.point import Point assert _test_args(Point(0, 1)) def test_sympy__geometry__point3d__Point3D(): from sympy.geometry.point3d import Point3D assert _test_args(Point3D(0, 1, 2)) def test_sympy__geometry__ellipse__Ellipse(): from sympy.geometry.ellipse import Ellipse assert _test_args(Ellipse((0, 1), 2, 3)) def test_sympy__geometry__ellipse__Circle(): from sympy.geometry.ellipse import Circle assert _test_args(Circle((0, 1), 2)) @SKIP("abstract class") def test_sympy__geometry__line__LinearEntity(): pass def test_sympy__geometry__line__Line(): from sympy.geometry.line import Line assert _test_args(Line((0, 1), (2, 3))) def test_sympy__geometry__line__Ray(): from sympy.geometry.line import Ray assert _test_args(Ray((0, 1), (2, 3))) def test_sympy__geometry__line__Segment(): from sympy.geometry.line import Segment assert _test_args(Segment((0, 1), (2, 3))) @SKIP("abstract class") def test_sympy__geometry__line3d__LinearEntity3D(): pass def test_sympy__geometry__line3d__Line3D(): from sympy.geometry.line3d import Line3D assert _test_args(Line3D((0, 1, 1), (2, 3, 4))) def test_sympy__geometry__line3d__Segment3D(): from sympy.geometry.line3d import Segment3D assert _test_args(Segment3D((0, 1, 1), (2, 3, 4))) def test_sympy__geometry__line3d__Ray3D(): from sympy.geometry.line3d import Ray3D assert _test_args(Ray3D((0, 1, 1), (2, 3, 4))) def test_sympy__geometry__plane__Plane(): from sympy.geometry.plane import Plane assert _test_args(Plane((1, 1, 1), (-3, 4, -2), (1, 2, 3))) def test_sympy__geometry__polygon__Polygon(): from sympy.geometry.polygon import Polygon assert _test_args(Polygon((0, 1), (2, 3), (4, 5), (6, 7))) def test_sympy__geometry__polygon__RegularPolygon(): from sympy.geometry.polygon import RegularPolygon assert _test_args(RegularPolygon((0, 1), 2, 3, 4)) def test_sympy__geometry__polygon__Triangle(): from sympy.geometry.polygon import Triangle assert _test_args(Triangle((0, 1), (2, 3), (4, 5))) def test_sympy__geometry__entity__GeometryEntity(): from sympy.geometry.entity import GeometryEntity from sympy.geometry.point import Point assert _test_args(GeometryEntity(Point(1, 0), 1, [1, 2])) def test_sympy__diffgeom__diffgeom__Manifold(): from sympy.diffgeom import Manifold assert _test_args(Manifold('name', 3)) def test_sympy__diffgeom__diffgeom__Patch(): from sympy.diffgeom import Manifold, Patch assert _test_args(Patch('name', Manifold('name', 3))) def test_sympy__diffgeom__diffgeom__CoordSystem(): from sympy.diffgeom import Manifold, Patch, CoordSystem assert _test_args(CoordSystem('name', Patch('name', Manifold('name', 3)))) @XFAIL def test_sympy__diffgeom__diffgeom__Point(): from sympy.diffgeom import Manifold, Patch, CoordSystem, Point assert _test_args(Point( CoordSystem('name', Patch('name', Manifold('name', 3))), [x, y])) def test_sympy__diffgeom__diffgeom__BaseScalarField(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField cs = CoordSystem('name', Patch('name', Manifold('name', 3))) assert _test_args(BaseScalarField(cs, 0)) def test_sympy__diffgeom__diffgeom__BaseVectorField(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseVectorField cs = CoordSystem('name', Patch('name', Manifold('name', 3))) assert _test_args(BaseVectorField(cs, 0)) def test_sympy__diffgeom__diffgeom__Differential(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField, Differential cs = CoordSystem('name', Patch('name', Manifold('name', 3))) assert _test_args(Differential(BaseScalarField(cs, 0))) def test_sympy__diffgeom__diffgeom__Commutator(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseVectorField, Commutator cs = CoordSystem('name', Patch('name', Manifold('name', 3))) cs1 = CoordSystem('name1', Patch('name', Manifold('name', 3))) v = BaseVectorField(cs, 0) v1 = BaseVectorField(cs1, 0) assert _test_args(Commutator(v, v1)) def test_sympy__diffgeom__diffgeom__TensorProduct(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField, Differential, TensorProduct cs = CoordSystem('name', Patch('name', Manifold('name', 3))) d = Differential(BaseScalarField(cs, 0)) assert _test_args(TensorProduct(d, d)) def test_sympy__diffgeom__diffgeom__WedgeProduct(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField, Differential, WedgeProduct cs = CoordSystem('name', Patch('name', Manifold('name', 3))) d = Differential(BaseScalarField(cs, 0)) d1 = Differential(BaseScalarField(cs, 1)) assert _test_args(WedgeProduct(d, d1)) def test_sympy__diffgeom__diffgeom__LieDerivative(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField, Differential, BaseVectorField, LieDerivative cs = CoordSystem('name', Patch('name', Manifold('name', 3))) d = Differential(BaseScalarField(cs, 0)) v = BaseVectorField(cs, 0) assert _test_args(LieDerivative(v, d)) @XFAIL def test_sympy__diffgeom__diffgeom__BaseCovarDerivativeOp(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseCovarDerivativeOp cs = CoordSystem('name', Patch('name', Manifold('name', 3))) assert _test_args(BaseCovarDerivativeOp(cs, 0, [[[0, ]*3, ]*3, ]*3)) def test_sympy__diffgeom__diffgeom__CovarDerivativeOp(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseVectorField, CovarDerivativeOp cs = CoordSystem('name', Patch('name', Manifold('name', 3))) v = BaseVectorField(cs, 0) _test_args(CovarDerivativeOp(v, [[[0, ]*3, ]*3, ]*3)) def test_sympy__categories__baseclasses__Class(): from sympy.categories.baseclasses import Class assert _test_args(Class()) def test_sympy__categories__baseclasses__Object(): from sympy.categories import Object assert _test_args(Object("A")) @XFAIL def test_sympy__categories__baseclasses__Morphism(): from sympy.categories import Object, Morphism assert _test_args(Morphism(Object("A"), Object("B"))) def test_sympy__categories__baseclasses__IdentityMorphism(): from sympy.categories import Object, IdentityMorphism assert _test_args(IdentityMorphism(Object("A"))) def test_sympy__categories__baseclasses__NamedMorphism(): from sympy.categories import Object, NamedMorphism assert _test_args(NamedMorphism(Object("A"), Object("B"), "f")) def test_sympy__categories__baseclasses__CompositeMorphism(): from sympy.categories import Object, NamedMorphism, CompositeMorphism A = Object("A") B = Object("B") C = Object("C") f = NamedMorphism(A, B, "f") g = NamedMorphism(B, C, "g") assert _test_args(CompositeMorphism(f, g)) def test_sympy__categories__baseclasses__Diagram(): from sympy.categories import Object, NamedMorphism, Diagram A = Object("A") B = Object("B") C = Object("C") f = NamedMorphism(A, B, "f") d = Diagram([f]) assert _test_args(d) def test_sympy__categories__baseclasses__Category(): from sympy.categories import Object, NamedMorphism, Diagram, Category A = Object("A") B = Object("B") C = Object("C") f = NamedMorphism(A, B, "f") g = NamedMorphism(B, C, "g") d1 = Diagram([f, g]) d2 = Diagram([f]) K = Category("K", commutative_diagrams=[d1, d2]) assert _test_args(K) def test_sympy__ntheory__factor___totient(): from sympy.ntheory.factor_ import totient k = symbols('k', integer=True) t = totient(k) assert _test_args(t) def test_sympy__ntheory__factor___divisor_sigma(): from sympy.ntheory.factor_ import divisor_sigma k = symbols('k', integer=True) n = symbols('n', integer=True) t = divisor_sigma(n, k) assert _test_args(t) def test_sympy__ntheory__residue_ntheory__mobius(): from sympy.ntheory import mobius assert _test_args(mobius(2)) def test_sympy__physics__optics__waves__TWave(): from sympy.physics.optics import TWave A, f, phi = symbols('A, f, phi') assert _test_args(TWave(A, f, phi)) def test_sympy__physics__optics__gaussopt__BeamParameter(): from sympy.physics.optics import BeamParameter assert _test_args(BeamParameter(530e-9, 1, w=1e-3)) def test_sympy__physics__optics__medium__Medium(): from sympy.physics.optics import Medium assert _test_args(Medium('m')) def test_sympy__printing__codeprinter__Assignment(): from sympy.printing.codeprinter import Assignment assert _test_args(Assignment(x, y)) def test_sympy__vector__coordsysrect__CoordSysCartesian(): from sympy.vector.coordsysrect import CoordSysCartesian assert _test_args(CoordSysCartesian('C')) def test_sympy__vector__point__Point(): from sympy.vector.point import Point assert _test_args(Point('P')) def test_sympy__vector__basisdependent__BasisDependent(): from sympy.vector.basisdependent import BasisDependent #These classes have been created to maintain an OOP hierarchy #for Vectors and Dyadics. Are NOT meant to be initialized def test_sympy__vector__basisdependent__BasisDependentMul(): from sympy.vector.basisdependent import BasisDependentMul #These classes have been created to maintain an OOP hierarchy #for Vectors and Dyadics. Are NOT meant to be initialized def test_sympy__vector__basisdependent__BasisDependentAdd(): from sympy.vector.basisdependent import BasisDependentAdd #These classes have been created to maintain an OOP hierarchy #for Vectors and Dyadics. Are NOT meant to be initialized def test_sympy__vector__basisdependent__BasisDependentZero(): from sympy.vector.basisdependent import BasisDependentZero #These classes have been created to maintain an OOP hierarchy #for Vectors and Dyadics. Are NOT meant to be initialized def test_sympy__vector__vector__BaseVector(): from sympy.vector.vector import BaseVector from sympy.vector.coordsysrect import CoordSysCartesian C = CoordSysCartesian('C') assert _test_args(BaseVector('Ci', 0, C, ' ', ' ')) def test_sympy__vector__vector__VectorAdd(): from sympy.vector.vector import VectorAdd, VectorMul from sympy.vector.coordsysrect import CoordSysCartesian C = CoordSysCartesian('C') from sympy.abc import a, b, c, x, y, z v1 = a*C.i + b*C.j + c*C.k v2 = x*C.i + y*C.j + z*C.k assert _test_args(VectorAdd(v1, v2)) assert _test_args(VectorMul(x, v1)) def test_sympy__vector__vector__VectorMul(): from sympy.vector.vector import VectorMul from sympy.vector.coordsysrect import CoordSysCartesian C = CoordSysCartesian('C') from sympy.abc import a assert _test_args(VectorMul(a, C.i)) def test_sympy__vector__vector__VectorZero(): from sympy.vector.vector import VectorZero assert _test_args(VectorZero()) def test_sympy__vector__vector__Vector(): from sympy.vector.vector import Vector #Vector is never to be initialized using args pass def test_sympy__vector__dyadic__Dyadic(): from sympy.vector.dyadic import Dyadic #Dyadic is never to be initialized using args pass def test_sympy__vector__dyadic__BaseDyadic(): from sympy.vector.dyadic import BaseDyadic from sympy.vector.coordsysrect import CoordSysCartesian C = CoordSysCartesian('C') assert _test_args(BaseDyadic(C.i, C.j)) def test_sympy__vector__dyadic__DyadicMul(): from sympy.vector.dyadic import BaseDyadic, DyadicMul from sympy.vector.coordsysrect import CoordSysCartesian C = CoordSysCartesian('C') assert _test_args(DyadicMul(3, BaseDyadic(C.i, C.j))) def test_sympy__vector__dyadic__DyadicAdd(): from sympy.vector.dyadic import BaseDyadic, DyadicAdd from sympy.vector.coordsysrect import CoordSysCartesian C = CoordSysCartesian('C') assert _test_args(2 * DyadicAdd(BaseDyadic(C.i, C.i), BaseDyadic(C.i, C.j))) def test_sympy__vector__dyadic__DyadicZero(): from sympy.vector.dyadic import DyadicZero assert _test_args(DyadicZero()) def test_sympy__vector__deloperator__Del(): from sympy.vector.deloperator import Del from sympy.vector.coordsysrect import CoordSysCartesian C = CoordSysCartesian('C') assert _test_args(Del(C)) def test_sympy__vector__orienters__Orienter(): from sympy.vector.orienters import Orienter #Not to be initialized def test_sympy__vector__orienters__ThreeAngleOrienter(): from sympy.vector.orienters import ThreeAngleOrienter #Not to be initialized def test_sympy__vector__orienters__AxisOrienter(): from sympy.vector.orienters import AxisOrienter from sympy.vector.coordsysrect import CoordSysCartesian C = CoordSysCartesian('C') assert _test_args(AxisOrienter(x, C.i)) def test_sympy__vector__orienters__BodyOrienter(): from sympy.vector.orienters import BodyOrienter assert _test_args(BodyOrienter(x, y, z, '123')) def test_sympy__vector__orienters__SpaceOrienter(): from sympy.vector.orienters import SpaceOrienter assert _test_args(SpaceOrienter(x, y, z, '123')) def test_sympy__vector__orienters__QuaternionOrienter(): from sympy.vector.orienters import QuaternionOrienter a, b, c, d = symbols('a b c d') assert _test_args(QuaternionOrienter(a, b, c, d)) def test_sympy__vector__scalar__BaseScalar(): from sympy.vector.scalar import BaseScalar from sympy.vector.coordsysrect import CoordSysCartesian C = CoordSysCartesian('C') assert _test_args(BaseScalar('Cx', 0, C, ' ', ' '))
true
true
f7029b3beb7195ba068d85097363523b962ab536
136
py
Python
ARC076/ARC076e.py
VolgaKurvar/AtCoder
21acb489f1594bbb1cdc64fbf8421d876b5b476d
[ "Unlicense" ]
null
null
null
ARC076/ARC076e.py
VolgaKurvar/AtCoder
21acb489f1594bbb1cdc64fbf8421d876b5b476d
[ "Unlicense" ]
null
null
null
ARC076/ARC076e.py
VolgaKurvar/AtCoder
21acb489f1594bbb1cdc64fbf8421d876b5b476d
[ "Unlicense" ]
null
null
null
#ARC076e def main(): import sys input=sys.stdin.readline sys.setrecursionlimit(10**6) if __name__ == '__main__': main()
17
32
0.654412
def main(): import sys input=sys.stdin.readline sys.setrecursionlimit(10**6) if __name__ == '__main__': main()
true
true
f7029c9b1d4976a50cf98e8502601e9ea1b91175
337
py
Python
access/management/commands/access_create_internal_aliases.py
darkismus/kompassi
35dea2c7af2857a69cae5c5982b48f01ba56da1f
[ "CC-BY-3.0" ]
13
2015-11-29T12:19:12.000Z
2021-02-21T15:42:11.000Z
access/management/commands/access_create_internal_aliases.py
darkismus/kompassi
35dea2c7af2857a69cae5c5982b48f01ba56da1f
[ "CC-BY-3.0" ]
23
2015-04-29T19:43:34.000Z
2021-02-10T05:50:17.000Z
access/management/commands/access_create_internal_aliases.py
darkismus/kompassi
35dea2c7af2857a69cae5c5982b48f01ba56da1f
[ "CC-BY-3.0" ]
11
2015-09-20T18:59:00.000Z
2020-02-07T08:47:34.000Z
from django.core.management.base import BaseCommand from django.utils.timezone import now class Command(BaseCommand): args = '[event_slug...]' help = 'Create missing email aliases' def handle(*args, **opts): from access.models import InternalEmailAlias InternalEmailAlias.ensure_internal_email_aliases()
24.071429
58
0.732938
from django.core.management.base import BaseCommand from django.utils.timezone import now class Command(BaseCommand): args = '[event_slug...]' help = 'Create missing email aliases' def handle(*args, **opts): from access.models import InternalEmailAlias InternalEmailAlias.ensure_internal_email_aliases()
true
true
f7029d2c344fe7b18326f6b65133b21dcec1e3e7
956
py
Python
django_server/feedback_map/rest/tests/base.py
ForumViriumHelsinki/FVHFeedbackMap
cfbf3dd8715e34351fb1ba628ebb0c5eb82e78e0
[ "MIT" ]
null
null
null
django_server/feedback_map/rest/tests/base.py
ForumViriumHelsinki/FVHFeedbackMap
cfbf3dd8715e34351fb1ba628ebb0c5eb82e78e0
[ "MIT" ]
39
2020-12-30T11:03:49.000Z
2021-11-10T12:16:29.000Z
django_server/feedback_map/rest/tests/base.py
ForumViriumHelsinki/FVHFeedbackMap
cfbf3dd8715e34351fb1ba628ebb0c5eb82e78e0
[ "MIT" ]
1
2020-12-08T13:19:33.000Z
2020-12-08T13:19:33.000Z
from django.contrib.auth.models import User from rest_framework.test import APITestCase class FVHAPITestCase(APITestCase): def assert_dict_contains(self, superset, subset, path=''): for key, expected in subset.items(): full_path = path + key received = superset.get(key, None) if isinstance(expected, dict) and isinstance(received, dict): self.assert_dict_contains(superset[key], expected, full_path + '.') else: assert received == expected, 'Value mismatch for key {}: {} != {}'.format( full_path, expected, received ) def create_user(self): return User.objects.create( username='courier', first_name='Coranne', last_name='Courier', email='coranne@couriersrus.com') def create_and_login_user(self): user = self.create_user() self.client.force_login(user) return user
38.24
111
0.621339
from django.contrib.auth.models import User from rest_framework.test import APITestCase class FVHAPITestCase(APITestCase): def assert_dict_contains(self, superset, subset, path=''): for key, expected in subset.items(): full_path = path + key received = superset.get(key, None) if isinstance(expected, dict) and isinstance(received, dict): self.assert_dict_contains(superset[key], expected, full_path + '.') else: assert received == expected, 'Value mismatch for key {}: {} != {}'.format( full_path, expected, received ) def create_user(self): return User.objects.create( username='courier', first_name='Coranne', last_name='Courier', email='coranne@couriersrus.com') def create_and_login_user(self): user = self.create_user() self.client.force_login(user) return user
true
true
f7029dd1805bb86b5c26cdc6d17720b337162e8a
20,089
bzl
Python
packages/typescript/internal/ts_project.bzl
JiaLiPassion/rules_nodejs
2424d1e32b564fcc37b57d593b871461a62f3237
[ "Apache-2.0" ]
null
null
null
packages/typescript/internal/ts_project.bzl
JiaLiPassion/rules_nodejs
2424d1e32b564fcc37b57d593b871461a62f3237
[ "Apache-2.0" ]
null
null
null
packages/typescript/internal/ts_project.bzl
JiaLiPassion/rules_nodejs
2424d1e32b564fcc37b57d593b871461a62f3237
[ "Apache-2.0" ]
null
null
null
"ts_project rule" load("@build_bazel_rules_nodejs//:providers.bzl", "DeclarationInfo", "NpmPackageInfo", "declaration_info", "js_module_info", "run_node") _DEFAULT_TSC = ( # BEGIN-INTERNAL "@npm" + # END-INTERNAL "//typescript/bin:tsc" ) _ATTRS = { "args": attr.string_list(), "declaration_dir": attr.string(), "deps": attr.label_list(providers = [DeclarationInfo]), "extends": attr.label_list(allow_files = [".json"]), "out_dir": attr.string(), "root_dir": attr.string(), # NB: no restriction on extensions here, because tsc sometimes adds type-check support # for more file kinds (like require('some.json')) and also # if you swap out the `compiler` attribute (like with ngtsc) # that compiler might allow more sources than tsc does. "srcs": attr.label_list(allow_files = True, mandatory = True), "tsc": attr.label(default = Label(_DEFAULT_TSC), executable = True, cfg = "host"), "tsconfig": attr.label(mandatory = True, allow_single_file = [".json"]), } # tsc knows how to produce the following kinds of output files. # NB: the macro `ts_project_macro` will set these outputs based on user # telling us which settings are enabled in the tsconfig for this project. _OUTPUTS = { "buildinfo_out": attr.output(), "js_outs": attr.output_list(), "map_outs": attr.output_list(), "typing_maps_outs": attr.output_list(), "typings_outs": attr.output_list(), } _TsConfigInfo = provider( doc = """Passes tsconfig.json files to downstream compilations so that TypeScript can read them. This is needed to support Project References""", fields = { "tsconfigs": "depset of tsconfig.json files", }, ) def _join(*elements): return "/".join([f for f in elements if f]) def _ts_project_impl(ctx): arguments = ctx.actions.args() # Add user specified arguments *before* rule supplied arguments arguments.add_all(ctx.attr.args) arguments.add_all([ "--project", ctx.file.tsconfig.path, "--outDir", _join(ctx.bin_dir.path, ctx.label.package, ctx.attr.out_dir), "--rootDir", _join(ctx.label.package, ctx.attr.root_dir) if ctx.label.package else ".", ]) if len(ctx.outputs.typings_outs) > 0: declaration_dir = ctx.attr.declaration_dir if ctx.attr.declaration_dir else ctx.attr.out_dir arguments.add_all([ "--declarationDir", _join(ctx.bin_dir.path, ctx.label.package, declaration_dir), ]) # When users report problems, we can ask them to re-build with # --define=VERBOSE_LOGS=1 # so anything that's useful to diagnose rule failures belongs here if "VERBOSE_LOGS" in ctx.var.keys(): arguments.add_all([ # What files were in the ts.Program "--listFiles", # Did tsc write all outputs to the place we expect to find them? "--listEmittedFiles", # Why did module resolution fail? "--traceResolution", # Why was the build slow? "--diagnostics", "--extendedDiagnostics", ]) deps_depsets = [] for dep in ctx.attr.deps: if _TsConfigInfo in dep: deps_depsets.append(dep[_TsConfigInfo].tsconfigs) if NpmPackageInfo in dep: # TODO: we could maybe filter these to be tsconfig.json or *.d.ts only # we don't expect tsc wants to read any other files from npm packages. deps_depsets.append(dep[NpmPackageInfo].sources) if DeclarationInfo in dep: deps_depsets.append(dep[DeclarationInfo].transitive_declarations) inputs = ctx.files.srcs + depset(transitive = deps_depsets).to_list() + [ctx.file.tsconfig] if ctx.attr.extends: inputs.extend(ctx.files.extends) # We do not try to predeclare json_outs, because their output locations generally conflict with their path in the source tree. # (The exception is when out_dir is used, then the .json output is a different path than the input.) # However tsc will copy .json srcs to the output tree so we want to declare these outputs to include along with .js Default outs # NB: We don't have emit_declaration_only setting here, so use presence of any JS outputs as an equivalent. # tsc will only produce .json if it also produces .js if len(ctx.outputs.js_outs): json_outs = [ ctx.actions.declare_file(_join(ctx.attr.out_dir, src.short_path[len(ctx.label.package) + 1:])) for src in ctx.files.srcs if src.basename.endswith(".json") ] else: json_outs = [] outputs = json_outs + ctx.outputs.js_outs + ctx.outputs.map_outs + ctx.outputs.typings_outs + ctx.outputs.typing_maps_outs if ctx.outputs.buildinfo_out: outputs.append(ctx.outputs.buildinfo_out) runtime_outputs = depset(json_outs + ctx.outputs.js_outs + ctx.outputs.map_outs) typings_outputs = ctx.outputs.typings_outs + ctx.outputs.typing_maps_outs + [s for s in ctx.files.srcs if s.path.endswith(".d.ts")] if len(outputs) > 0: run_node( ctx, inputs = inputs, arguments = [arguments], outputs = outputs, executable = "tsc", progress_message = "Compiling TypeScript project %s [tsc -p %s]" % ( ctx.label, ctx.file.tsconfig.short_path, ), ) providers = [ # DefaultInfo is what you see on the command-line for a built library, # and determines what files are used by a simple non-provider-aware # downstream library. # Only the JavaScript outputs are intended for use in non-TS-aware # dependents. DefaultInfo( files = runtime_outputs, runfiles = ctx.runfiles( transitive_files = runtime_outputs, collect_default = True, ), ), js_module_info( sources = runtime_outputs, deps = ctx.attr.deps, ), _TsConfigInfo(tsconfigs = depset([ctx.file.tsconfig] + ctx.files.extends, transitive = [ dep[_TsConfigInfo].tsconfigs for dep in ctx.attr.deps if _TsConfigInfo in dep ])), ] # Don't provide DeclarationInfo if there are no typings to provide. # Improves error messaging if a ts_project needs declaration = True if len(typings_outputs) or len(ctx.attr.deps): providers.append(declaration_info(depset(typings_outputs), ctx.attr.deps)) providers.append(OutputGroupInfo(types = depset(typings_outputs))) return providers ts_project = rule( implementation = _ts_project_impl, attrs = dict(_ATTRS, **_OUTPUTS), ) def _validate_options_impl(ctx): # Bazel won't run our action unless its output is needed, so make a marker file # We make it a .d.ts file so we can plumb it to the deps of the ts_project compile. marker = ctx.actions.declare_file("%s.optionsvalid.d.ts" % ctx.label.name) arguments = ctx.actions.args() arguments.add_all([ctx.file.tsconfig.path, marker.path, ctx.attr.target, struct( declaration = ctx.attr.declaration, declaration_map = ctx.attr.declaration_map, composite = ctx.attr.composite, emit_declaration_only = ctx.attr.emit_declaration_only, source_map = ctx.attr.source_map, incremental = ctx.attr.incremental, ).to_json()]) run_node( ctx, inputs = [ctx.file.tsconfig] + ctx.files.extends, outputs = [marker], arguments = [arguments], executable = "validator", ) return [ DeclarationInfo( transitive_declarations = depset([marker]), ), ] validate_options = rule( implementation = _validate_options_impl, attrs = { "composite": attr.bool(), "declaration": attr.bool(), "declaration_map": attr.bool(), "emit_declaration_only": attr.bool(), "extends": attr.label_list(allow_files = [".json"]), "incremental": attr.bool(), "source_map": attr.bool(), "target": attr.string(), "tsconfig": attr.label(mandatory = True, allow_single_file = [".json"]), "validator": attr.label(default = Label("//packages/typescript/bin:ts_project_options_validator"), executable = True, cfg = "host"), }, ) def _out_paths(srcs, outdir, rootdir, ext): rootdir_replace_pattern = rootdir + "/" if rootdir else "" return [_join(outdir, f[:f.rindex(".")].replace(rootdir_replace_pattern, "") + ext) for f in srcs if not f.endswith(".d.ts") and not f.endswith(".json")] def ts_project_macro( name = "tsconfig", tsconfig = None, srcs = None, args = [], deps = [], extends = None, declaration = False, source_map = False, declaration_map = False, composite = False, incremental = False, emit_declaration_only = False, tsc = None, validate = True, declaration_dir = None, out_dir = None, root_dir = None, **kwargs): """Compiles one TypeScript project using `tsc --project` This is a drop-in replacement for the `tsc` rule automatically generated for the "typescript" package, typically loaded from `@npm//typescript:index.bzl`. Unlike bare `tsc`, this rule understands the Bazel interop mechanism (Providers) so that this rule works with others that produce or consume TypeScript typings (`.d.ts` files). Unlike `ts_library`, this rule is the thinnest possible layer of Bazel interoperability on top of the TypeScript compiler. It shifts the burden of configuring TypeScript into the tsconfig.json file. See https://github.com/bazelbuild/rules_nodejs/blob/master/docs/TypeScript.md#alternatives for more details about the trade-offs between the two rules. Some TypeScript options affect which files are emitted, and Bazel wants to know these ahead-of-time. So several options from the tsconfig file must be mirrored as attributes to ts_project. See https://www.typescriptlang.org/v2/en/tsconfig for a listing of the TypeScript options. Any code that works with `tsc` should work with `ts_project` with a few caveats: - Bazel requires that the `outDir` (and `declarationDir`) be set to `bazel-out/[target architecture]/bin/path/to/package` so we override whatever settings appear in your tsconfig. - Bazel expects that each output is produced by a single rule. Thus if you have two `ts_project` rules with overlapping sources (the same `.ts` file appears in more than one) then you get an error about conflicting `.js` output files if you try to build both together. Worse, if you build them separately then the output directory will contain whichever one you happened to build most recently. This is highly discouraged. > Note: in order for TypeScript to resolve relative references to the bazel-out folder, > we recommend that the base tsconfig contain a rootDirs section that includes all > possible locations they may appear. > > We hope this will not be needed in some future release of TypeScript. > Follow https://github.com/microsoft/TypeScript/issues/37257 for more info. > > For example, if the base tsconfig file relative to the workspace root is > `path/to/tsconfig.json` then you should configure like: > > ``` > "compilerOptions": { > "rootDirs": [ > ".", > "../../bazel-out/darwin-fastbuild/bin/path/to", > "../../bazel-out/k8-fastbuild/bin/path/to", > "../../bazel-out/x64_windows-fastbuild/bin/path/to", > "../../bazel-out/darwin-dbg/bin/path/to", > "../../bazel-out/k8-dbg/bin/path/to", > "../../bazel-out/x64_windows-dbg/bin/path/to", > ] > } > ``` ### Issues when running non-sandboxed When using a non-sandboxed spawn strategy (which is the default on Windows), you may observe these problems which require workarounds: 1) Bazel deletes outputs from the previous execution before running `tsc`. This causes a problem with TypeScript's incremental mode: if the `.tsbuildinfo` file is not known to be an output of the rule, then Bazel will leave it in the output directory, and when `tsc` runs, it may see that the outputs written by the prior invocation are up-to-date and skip the emit of these files. This will cause Bazel to intermittently fail with an error that some outputs were not written. This is why we depend on `composite` and/or `incremental` attributes to be provided, so we can tell Bazel to expect a `.tsbuildinfo` output to ensure it is deleted before a subsequent compilation. At present, we don't do anything useful with the `.tsbuildinfo` output, and this rule does not actually have incremental behavior. Deleting the file is actually counter-productive in terms of TypeScript compile performance. Follow https://github.com/bazelbuild/rules_nodejs/issues/1726 2) When using Project References, TypeScript will expect to verify that the outputs of referenced projects are up-to-date with respect to their inputs. (This is true even without using the `--build` option). When using a non-sandboxed spawn strategy, `tsc` can read the sources from other `ts_project` rules in your project, and will expect that the `tsconfig.json` file for those references will indicate where the outputs were written. However the `outDir` is determined by this Bazel rule so it cannot be known from reading the `tsconfig.json` file. This problem is manifested as a TypeScript diagnostic like `error TS6305: Output file '/path/to/execroot/a.d.ts' has not been built from source file '/path/to/execroot/a.ts'.` As a workaround, you can give the Windows "fastbuild" output directory as the `outDir` in your tsconfig file. On other platforms, the value isn't read so it does no harm. See https://github.com/bazelbuild/rules_nodejs/tree/stable/packages/typescript/test/ts_project as an example. We hope this will be fixed in a future release of TypeScript; follow https://github.com/microsoft/TypeScript/issues/37378 3) When TypeScript encounters an import statement, it adds the source file resolved by that reference to the program. However you may have included that source file in a different project, so this causes the problem mentioned above where a source file is in multiple programs. (Note, if you use Project References this is not the case, TS will know the referenced file is part of the other program.) This will result in duplicate emit for the same file, which produces an error since the files written to the output tree are read-only. Workarounds include using using Project References, or simply grouping the whole compilation into one program (if this doesn't exceed your time budget). Args: name: A name for the target. We recommend you use the basename (no `.json` extension) of the tsconfig file that should be compiled. srcs: List of labels of TypeScript source files to be provided to the compiler. If absent, defaults to `**/*.ts[x]` (all TypeScript files in the package). deps: List of labels of other rules that produce TypeScript typings (.d.ts files) tsconfig: Label of the tsconfig.json file to use for the compilation. By default, we add `.json` to the `name` attribute. extends: List of labels of tsconfig file(s) referenced in `extends` section of tsconfig. Must include any tsconfig files "chained" by extends clauses. args: List of strings of additional command-line arguments to pass to tsc. tsc: Label of the TypeScript compiler binary to run. For example, `tsc = "@my_deps//typescript/bin:tsc"` Or you can pass a custom compiler binary instead. validate: boolean; whether to check that the tsconfig settings match the attributes. root_dir: a string specifying a subdirectory under the input package which should be consider the root directory of all the input files. Equivalent to the TypeScript --rootDir option. By default it is '.', meaning the source directory where the BUILD file lives. out_dir: a string specifying a subdirectory under the bazel-out folder where outputs are written. Equivalent to the TypeScript --outDir option. Note that Bazel always requires outputs be written under a subdirectory matching the input package, so if your rule appears in path/to/my/package/BUILD.bazel and out_dir = "foo" then the .js files will appear in bazel-out/[arch]/bin/path/to/my/package/foo/*.js. By default the out_dir is '.', meaning the packages folder in bazel-out. declaration_dir: a string specifying a subdirectory under the bazel-out folder where generated declaration outputs are written. Equivalent to the TypeScript --declarationDir option. By default declarations are written to the out_dir. declaration: if the `declaration` bit is set in the tsconfig. Instructs Bazel to expect a `.d.ts` output for each `.ts` source. source_map: if the `sourceMap` bit is set in the tsconfig. Instructs Bazel to expect a `.js.map` output for each `.ts` source. declaration_map: if the `declarationMap` bit is set in the tsconfig. Instructs Bazel to expect a `.d.ts.map` output for each `.ts` source. composite: if the `composite` bit is set in the tsconfig. Instructs Bazel to expect a `.tsbuildinfo` output and a `.d.ts` output for each `.ts` source. incremental: if the `incremental` bit is set in the tsconfig. Instructs Bazel to expect a `.tsbuildinfo` output. emit_declaration_only: if the `emitDeclarationOnly` bit is set in the tsconfig. Instructs Bazel *not* to expect `.js` or `.js.map` outputs for `.ts` sources. **kwargs: passed through to underlying rule, allows eg. visibility, tags """ if srcs == None: srcs = native.glob(["**/*.ts", "**/*.tsx"]) if tsconfig == None: tsconfig = name + ".json" extra_deps = [] if validate: validate_options( name = "_validate_%s_options" % name, target = "//%s:%s" % (native.package_name(), name), declaration = declaration, source_map = source_map, declaration_map = declaration_map, composite = composite, incremental = incremental, emit_declaration_only = emit_declaration_only, tsconfig = tsconfig, extends = extends, ) extra_deps.append("_validate_%s_options" % name) typings_out_dir = declaration_dir if declaration_dir else out_dir ts_project( name = name, srcs = srcs, args = args, deps = deps + extra_deps, tsconfig = tsconfig, extends = extends, declaration_dir = declaration_dir, out_dir = out_dir, root_dir = root_dir, js_outs = _out_paths(srcs, out_dir, root_dir, ".js") if not emit_declaration_only else [], map_outs = _out_paths(srcs, out_dir, root_dir, ".js.map") if source_map and not emit_declaration_only else [], typings_outs = _out_paths(srcs, typings_out_dir, root_dir, ".d.ts") if declaration or composite else [], typing_maps_outs = _out_paths(srcs, typings_out_dir, root_dir, ".d.ts.map") if declaration_map else [], buildinfo_out = tsconfig[:-5] + ".tsbuildinfo" if composite or incremental else None, tsc = tsc, **kwargs )
45.865297
157
0.662104
load("@build_bazel_rules_nodejs//:providers.bzl", "DeclarationInfo", "NpmPackageInfo", "declaration_info", "js_module_info", "run_node") _DEFAULT_TSC = ( "@npm" + "//typescript/bin:tsc" ) _ATTRS = { "args": attr.string_list(), "declaration_dir": attr.string(), "deps": attr.label_list(providers = [DeclarationInfo]), "extends": attr.label_list(allow_files = [".json"]), "out_dir": attr.string(), "root_dir": attr.string(), "srcs": attr.label_list(allow_files = True, mandatory = True), "tsc": attr.label(default = Label(_DEFAULT_TSC), executable = True, cfg = "host"), "tsconfig": attr.label(mandatory = True, allow_single_file = [".json"]), } _OUTPUTS = { "buildinfo_out": attr.output(), "js_outs": attr.output_list(), "map_outs": attr.output_list(), "typing_maps_outs": attr.output_list(), "typings_outs": attr.output_list(), } _TsConfigInfo = provider( doc = """Passes tsconfig.json files to downstream compilations so that TypeScript can read them. This is needed to support Project References""", fields = { "tsconfigs": "depset of tsconfig.json files", }, ) def _join(*elements): return "/".join([f for f in elements if f]) def _ts_project_impl(ctx): arguments = ctx.actions.args() arguments.add_all(ctx.attr.args) arguments.add_all([ "--project", ctx.file.tsconfig.path, "--outDir", _join(ctx.bin_dir.path, ctx.label.package, ctx.attr.out_dir), "--rootDir", _join(ctx.label.package, ctx.attr.root_dir) if ctx.label.package else ".", ]) if len(ctx.outputs.typings_outs) > 0: declaration_dir = ctx.attr.declaration_dir if ctx.attr.declaration_dir else ctx.attr.out_dir arguments.add_all([ "--declarationDir", _join(ctx.bin_dir.path, ctx.label.package, declaration_dir), ]) if "VERBOSE_LOGS" in ctx.var.keys(): arguments.add_all([ # What files were in the ts.Program "--listFiles", # Did tsc write all outputs to the place we expect to find them? "--listEmittedFiles", # Why did module resolution fail? "--traceResolution", # Why was the build slow? "--diagnostics", "--extendedDiagnostics", ]) deps_depsets = [] for dep in ctx.attr.deps: if _TsConfigInfo in dep: deps_depsets.append(dep[_TsConfigInfo].tsconfigs) if NpmPackageInfo in dep: # TODO: we could maybe filter these to be tsconfig.json or *.d.ts only # we don't expect tsc wants to read any other files from npm packages. deps_depsets.append(dep[NpmPackageInfo].sources) if DeclarationInfo in dep: deps_depsets.append(dep[DeclarationInfo].transitive_declarations) inputs = ctx.files.srcs + depset(transitive = deps_depsets).to_list() + [ctx.file.tsconfig] if ctx.attr.extends: inputs.extend(ctx.files.extends) # tsc will only produce .json if it also produces .js if len(ctx.outputs.js_outs): json_outs = [ ctx.actions.declare_file(_join(ctx.attr.out_dir, src.short_path[len(ctx.label.package) + 1:])) for src in ctx.files.srcs if src.basename.endswith(".json") ] else: json_outs = [] outputs = json_outs + ctx.outputs.js_outs + ctx.outputs.map_outs + ctx.outputs.typings_outs + ctx.outputs.typing_maps_outs if ctx.outputs.buildinfo_out: outputs.append(ctx.outputs.buildinfo_out) runtime_outputs = depset(json_outs + ctx.outputs.js_outs + ctx.outputs.map_outs) typings_outputs = ctx.outputs.typings_outs + ctx.outputs.typing_maps_outs + [s for s in ctx.files.srcs if s.path.endswith(".d.ts")] if len(outputs) > 0: run_node( ctx, inputs = inputs, arguments = [arguments], outputs = outputs, executable = "tsc", progress_message = "Compiling TypeScript project %s [tsc -p %s]" % ( ctx.label, ctx.file.tsconfig.short_path, ), ) providers = [ # DefaultInfo is what you see on the command-line for a built library, # and determines what files are used by a simple non-provider-aware # downstream library. # Only the JavaScript outputs are intended for use in non-TS-aware # dependents. DefaultInfo( files = runtime_outputs, runfiles = ctx.runfiles( transitive_files = runtime_outputs, collect_default = True, ), ), js_module_info( sources = runtime_outputs, deps = ctx.attr.deps, ), _TsConfigInfo(tsconfigs = depset([ctx.file.tsconfig] + ctx.files.extends, transitive = [ dep[_TsConfigInfo].tsconfigs for dep in ctx.attr.deps if _TsConfigInfo in dep ])), ] # Don't provide DeclarationInfo if there are no typings to provide. if len(typings_outputs) or len(ctx.attr.deps): providers.append(declaration_info(depset(typings_outputs), ctx.attr.deps)) providers.append(OutputGroupInfo(types = depset(typings_outputs))) return providers ts_project = rule( implementation = _ts_project_impl, attrs = dict(_ATTRS, **_OUTPUTS), ) def _validate_options_impl(ctx): # We make it a .d.ts file so we can plumb it to the deps of the ts_project compile. marker = ctx.actions.declare_file("%s.optionsvalid.d.ts" % ctx.label.name) arguments = ctx.actions.args() arguments.add_all([ctx.file.tsconfig.path, marker.path, ctx.attr.target, struct( declaration = ctx.attr.declaration, declaration_map = ctx.attr.declaration_map, composite = ctx.attr.composite, emit_declaration_only = ctx.attr.emit_declaration_only, source_map = ctx.attr.source_map, incremental = ctx.attr.incremental, ).to_json()]) run_node( ctx, inputs = [ctx.file.tsconfig] + ctx.files.extends, outputs = [marker], arguments = [arguments], executable = "validator", ) return [ DeclarationInfo( transitive_declarations = depset([marker]), ), ] validate_options = rule( implementation = _validate_options_impl, attrs = { "composite": attr.bool(), "declaration": attr.bool(), "declaration_map": attr.bool(), "emit_declaration_only": attr.bool(), "extends": attr.label_list(allow_files = [".json"]), "incremental": attr.bool(), "source_map": attr.bool(), "target": attr.string(), "tsconfig": attr.label(mandatory = True, allow_single_file = [".json"]), "validator": attr.label(default = Label("//packages/typescript/bin:ts_project_options_validator"), executable = True, cfg = "host"), }, ) def _out_paths(srcs, outdir, rootdir, ext): rootdir_replace_pattern = rootdir + "/" if rootdir else "" return [_join(outdir, f[:f.rindex(".")].replace(rootdir_replace_pattern, "") + ext) for f in srcs if not f.endswith(".d.ts") and not f.endswith(".json")] def ts_project_macro( name = "tsconfig", tsconfig = None, srcs = None, args = [], deps = [], extends = None, declaration = False, source_map = False, declaration_map = False, composite = False, incremental = False, emit_declaration_only = False, tsc = None, validate = True, declaration_dir = None, out_dir = None, root_dir = None, **kwargs): if srcs == None: srcs = native.glob(["**/*.ts", "**/*.tsx"]) if tsconfig == None: tsconfig = name + ".json" extra_deps = [] if validate: validate_options( name = "_validate_%s_options" % name, target = "//%s:%s" % (native.package_name(), name), declaration = declaration, source_map = source_map, declaration_map = declaration_map, composite = composite, incremental = incremental, emit_declaration_only = emit_declaration_only, tsconfig = tsconfig, extends = extends, ) extra_deps.append("_validate_%s_options" % name) typings_out_dir = declaration_dir if declaration_dir else out_dir ts_project( name = name, srcs = srcs, args = args, deps = deps + extra_deps, tsconfig = tsconfig, extends = extends, declaration_dir = declaration_dir, out_dir = out_dir, root_dir = root_dir, js_outs = _out_paths(srcs, out_dir, root_dir, ".js") if not emit_declaration_only else [], map_outs = _out_paths(srcs, out_dir, root_dir, ".js.map") if source_map and not emit_declaration_only else [], typings_outs = _out_paths(srcs, typings_out_dir, root_dir, ".d.ts") if declaration or composite else [], typing_maps_outs = _out_paths(srcs, typings_out_dir, root_dir, ".d.ts.map") if declaration_map else [], buildinfo_out = tsconfig[:-5] + ".tsbuildinfo" if composite or incremental else None, tsc = tsc, **kwargs )
true
true
f7029df8f7bce93b2ecff06eff3a6b32d27d9d3d
140
py
Python
marrow/dsl/exc.py
marrow/dsl
79899531361aace5741a79de79304e1f7aaa600d
[ "MIT" ]
10
2018-02-23T07:13:29.000Z
2021-11-08T14:53:47.000Z
marrow/dsl/exc.py
marrow/dsl
79899531361aace5741a79de79304e1f7aaa600d
[ "MIT" ]
null
null
null
marrow/dsl/exc.py
marrow/dsl
79899531361aace5741a79de79304e1f7aaa600d
[ "MIT" ]
null
null
null
# encoding: utf-8 from __future__ import unicode_literals class TranslationError(Exception): """Failure to translate source.""" pass
14
39
0.757143
from __future__ import unicode_literals class TranslationError(Exception): pass
true
true
f7029e2387e943ad5f298a67c17e2e40f52d5567
362
py
Python
sample_problems/problems_with_solution57.py
adi01trip01/adi_workspace
f493b3ba84645eec3a57607243760a826880d1a3
[ "MIT" ]
null
null
null
sample_problems/problems_with_solution57.py
adi01trip01/adi_workspace
f493b3ba84645eec3a57607243760a826880d1a3
[ "MIT" ]
null
null
null
sample_problems/problems_with_solution57.py
adi01trip01/adi_workspace
f493b3ba84645eec3a57607243760a826880d1a3
[ "MIT" ]
null
null
null
# Write a Python program to get execution time for a Python method. import time def sum_of_n_numbers(x): start_time = time.time() s = 0 for i in range(1, x + 1): s = s + i end_time = time.time() return s, end_time - start_time n = 5 print("\nTime to sum of 1 to ", n, " and required time to calculate is :", sum_of_n_numbers(n))
21.294118
95
0.632597
import time def sum_of_n_numbers(x): start_time = time.time() s = 0 for i in range(1, x + 1): s = s + i end_time = time.time() return s, end_time - start_time n = 5 print("\nTime to sum of 1 to ", n, " and required time to calculate is :", sum_of_n_numbers(n))
true
true
f7029e2981a4619967429d8979bc4eb77eb2ef8d
13,759
py
Python
kivy/input/providers/mtdev.py
DaleEMoore/kivy
3fc5a455d8f50c857866082091f09b834ecf64af
[ "MIT" ]
1
2017-10-25T03:23:36.000Z
2017-10-25T03:23:36.000Z
kivy/input/providers/mtdev.py
DaleEMoore/kivy
3fc5a455d8f50c857866082091f09b834ecf64af
[ "MIT" ]
null
null
null
kivy/input/providers/mtdev.py
DaleEMoore/kivy
3fc5a455d8f50c857866082091f09b834ecf64af
[ "MIT" ]
null
null
null
''' Native support for Multitouch devices on Linux, using libmtdev. =============================================================== The Mtdev project is a part of the Ubuntu Maverick multitouch architecture. You can read more on http://wiki.ubuntu.com/Multitouch To configure MTDev, it's preferable to use probesysfs providers. Check :py:class:`~kivy.input.providers.probesysfs` for more information. Otherwise, add this to your configuration:: [input] # devicename = hidinput,/dev/input/eventXX acert230h = mtdev,/dev/input/event2 .. note:: You must have read access to the input event. You can use a custom range for the X, Y and pressure values. On some drivers, the range reported is invalid. To fix that, you can add these options to the argument line: * invert_x : 1 to invert X axis * invert_y : 1 to invert Y axis * min_position_x : X minimum * max_position_x : X maximum * min_position_y : Y minimum * max_position_y : Y maximum * min_pressure : pressure minimum * max_pressure : pressure maximum * min_touch_major : width shape minimum * max_touch_major : width shape maximum * min_touch_minor : width shape minimum * max_touch_minor : height shape maximum * rotation : 0,90,180 or 270 to rotate ''' __all__ = ('MTDMotionEventProvider', 'MTDMotionEvent') import os from kivy.input.motionevent import MotionEvent from kivy.input.shape import ShapeRect class MTDMotionEvent(MotionEvent): def depack(self, args): self.is_touch = True if 'x' in args: self.sx = args['x'] else: self.sx = -1 if 'y' in args: self.sy = args['y'] else: self.sy = -1 self.profile = ['pos'] if 'size_w' in args and 'size_h' in args: self.shape = ShapeRect() self.shape.width = args['size_w'] self.shape.height = args['size_h'] self.profile.append('shape') if 'pressure' in args: self.pressure = args['pressure'] self.profile.append('pressure') super(MTDMotionEvent, self).depack(args) def __str__(self): i, sx, sy, d = (self.id, self.sx, self.sy, self.device) return '<MTDMotionEvent id=%d pos=(%f, %f) device=%s>' % (i, sx, sy, d) if 'KIVY_DOC' in os.environ: # documentation hack MTDMotionEventProvider = None else: import threading import collections from kivy.lib.mtdev import Device, \ MTDEV_TYPE_EV_ABS, MTDEV_CODE_SLOT, MTDEV_CODE_POSITION_X, \ MTDEV_CODE_POSITION_Y, MTDEV_CODE_PRESSURE, \ MTDEV_CODE_TOUCH_MAJOR, MTDEV_CODE_TOUCH_MINOR, \ MTDEV_CODE_TRACKING_ID, MTDEV_ABS_POSITION_X, \ MTDEV_ABS_POSITION_Y, MTDEV_ABS_TOUCH_MINOR, \ MTDEV_ABS_TOUCH_MAJOR from kivy.input.provider import MotionEventProvider from kivy.input.factory import MotionEventFactory from kivy.logger import Logger class MTDMotionEventProvider(MotionEventProvider): options = ('min_position_x', 'max_position_x', 'min_position_y', 'max_position_y', 'min_pressure', 'max_pressure', 'min_touch_major', 'max_touch_major', 'min_touch_minor', 'max_touch_minor', 'invert_x', 'invert_y', 'rotation') def __init__(self, device, args): super(MTDMotionEventProvider, self).__init__(device, args) self._device = None self.input_fn = None self.default_ranges = dict() # split arguments args = args.split(',') if not args: Logger.error('MTD: No filename pass to MTD configuration') Logger.error('MTD: Use /dev/input/event0 for example') return # read filename self.input_fn = args[0] Logger.info('MTD: Read event from <%s>' % self.input_fn) # read parameters for arg in args[1:]: if arg == '': continue arg = arg.split('=') # ensure it's a key = value if len(arg) != 2: err = 'MTD: Bad parameter %s: Not in key=value format' %\ arg Logger.error(err) continue # ensure the key exist key, value = arg if key not in MTDMotionEventProvider.options: Logger.error('MTD: unknown %s option' % key) continue # ensure the value try: self.default_ranges[key] = int(value) except ValueError: err = 'MTD: invalid value %s for option %s' % (key, value) Logger.error(err) continue # all good! Logger.info('MTD: Set custom %s to %d' % (key, int(value))) if 'rotation' not in self.default_ranges: self.default_ranges['rotation'] = 0 elif self.default_ranges['rotation'] not in (0, 90, 180, 270): Logger.error('HIDInput: invalid rotation value ({})'.format( self.default_ranges['rotation'])) self.default_ranges['rotation'] = 0 def start(self): if self.input_fn is None: return self.uid = 0 self.queue = collections.deque() self.thread = threading.Thread( target=self._thread_run, kwargs=dict( queue=self.queue, input_fn=self.input_fn, device=self.device, default_ranges=self.default_ranges)) self.thread.daemon = True self.thread.start() def _thread_run(self, **kwargs): input_fn = kwargs.get('input_fn') queue = kwargs.get('queue') device = kwargs.get('device') drs = kwargs.get('default_ranges').get touches = {} touches_sent = [] point = {} l_points = {} def assign_coord(point, value, invert, coords): cx, cy = coords if invert: value = 1. - value if rotation == 0: point[cx] = value elif rotation == 90: point[cy] = value elif rotation == 180: point[cx] = 1. - value elif rotation == 270: point[cy] = 1. - value def process(points): for args in points: # this can happen if we have a touch going on already at # the start of the app if 'id' not in args: continue tid = args['id'] try: touch = touches[tid] except KeyError: touch = MTDMotionEvent(device, tid, args) touches[touch.id] = touch touch.move(args) action = 'update' if tid not in touches_sent: action = 'begin' touches_sent.append(tid) if 'delete' in args: action = 'end' del args['delete'] del touches[touch.id] touches_sent.remove(tid) touch.update_time_end() queue.append((action, touch)) def normalize(value, vmin, vmax): return (value - vmin) / float(vmax - vmin) # open mtdev device _fn = input_fn _slot = 0 try: _device = Device(_fn) except OSError as e: if e.errno == 13: # Permission denied Logger.warn( 'MTD: Unable to open device "{0}". Please ensure you' ' have the appropriate permissions.'.format(_fn)) return else: raise _changes = set() # prepare some vars to get limit of some component ab = _device.get_abs(MTDEV_ABS_POSITION_X) range_min_position_x = drs('min_position_x', ab.minimum) range_max_position_x = drs('max_position_x', ab.maximum) Logger.info('MTD: <%s> range position X is %d - %d' % (_fn, range_min_position_x, range_max_position_x)) ab = _device.get_abs(MTDEV_ABS_POSITION_Y) range_min_position_y = drs('min_position_y', ab.minimum) range_max_position_y = drs('max_position_y', ab.maximum) Logger.info('MTD: <%s> range position Y is %d - %d' % (_fn, range_min_position_y, range_max_position_y)) ab = _device.get_abs(MTDEV_ABS_TOUCH_MAJOR) range_min_major = drs('min_touch_major', ab.minimum) range_max_major = drs('max_touch_major', ab.maximum) Logger.info('MTD: <%s> range touch major is %d - %d' % (_fn, range_min_major, range_max_major)) ab = _device.get_abs(MTDEV_ABS_TOUCH_MINOR) range_min_minor = drs('min_touch_minor', ab.minimum) range_max_minor = drs('max_touch_minor', ab.maximum) Logger.info('MTD: <%s> range touch minor is %d - %d' % (_fn, range_min_minor, range_max_minor)) range_min_pressure = drs('min_pressure', 0) range_max_pressure = drs('max_pressure', 255) Logger.info('MTD: <%s> range pressure is %d - %d' % (_fn, range_min_pressure, range_max_pressure)) invert_x = int(bool(drs('invert_x', 0))) invert_y = int(bool(drs('invert_y', 0))) Logger.info('MTD: <%s> axes invertion: X is %d, Y is %d' % (_fn, invert_x, invert_y)) rotation = drs('rotation', 0) Logger.info('MTD: <%s> rotation set to %d' % (_fn, rotation)) while _device: # idle as much as we can. while _device.idle(1000): continue # got data, read all without redoing idle while True: data = _device.get() if data is None: break # set the working slot if data.type == MTDEV_TYPE_EV_ABS and \ data.code == MTDEV_CODE_SLOT: _slot = data.value continue # fill the slot if _slot not in l_points: l_points[_slot] = dict() point = l_points[_slot] ev_value = data.value ev_code = data.code if ev_code == MTDEV_CODE_POSITION_X: val = normalize(ev_value, range_min_position_x, range_max_position_x) assign_coord(point, val, invert_x, 'xy') elif ev_code == MTDEV_CODE_POSITION_Y: val = 1. - normalize(ev_value, range_min_position_y, range_max_position_y) assign_coord(point, val, invert_y, 'yx') elif ev_code == MTDEV_CODE_PRESSURE: point['pressure'] = normalize(ev_value, range_min_pressure, range_max_pressure) elif ev_code == MTDEV_CODE_TOUCH_MAJOR: point['size_w'] = normalize(ev_value, range_min_major, range_max_major) elif ev_code == MTDEV_CODE_TOUCH_MINOR: point['size_h'] = normalize(ev_value, range_min_minor, range_max_minor) elif ev_code == MTDEV_CODE_TRACKING_ID: if ev_value == -1: point['delete'] = True # force process of changes here, as the slot can be # reused. _changes.add(_slot) process([l_points[x] for x in _changes]) _changes.clear() continue else: point['id'] = ev_value else: # unrecognized command, ignore. continue _changes.add(_slot) # push all changes if _changes: process([l_points[x] for x in _changes]) _changes.clear() def update(self, dispatch_fn): # dispatch all event from threads try: while True: event_type, touch = self.queue.popleft() dispatch_fn(event_type, touch) except: pass MotionEventFactory.register('mtdev', MTDMotionEventProvider)
38.977337
79
0.49226
__all__ = ('MTDMotionEventProvider', 'MTDMotionEvent') import os from kivy.input.motionevent import MotionEvent from kivy.input.shape import ShapeRect class MTDMotionEvent(MotionEvent): def depack(self, args): self.is_touch = True if 'x' in args: self.sx = args['x'] else: self.sx = -1 if 'y' in args: self.sy = args['y'] else: self.sy = -1 self.profile = ['pos'] if 'size_w' in args and 'size_h' in args: self.shape = ShapeRect() self.shape.width = args['size_w'] self.shape.height = args['size_h'] self.profile.append('shape') if 'pressure' in args: self.pressure = args['pressure'] self.profile.append('pressure') super(MTDMotionEvent, self).depack(args) def __str__(self): i, sx, sy, d = (self.id, self.sx, self.sy, self.device) return '<MTDMotionEvent id=%d pos=(%f, %f) device=%s>' % (i, sx, sy, d) if 'KIVY_DOC' in os.environ: MTDMotionEventProvider = None else: import threading import collections from kivy.lib.mtdev import Device, \ MTDEV_TYPE_EV_ABS, MTDEV_CODE_SLOT, MTDEV_CODE_POSITION_X, \ MTDEV_CODE_POSITION_Y, MTDEV_CODE_PRESSURE, \ MTDEV_CODE_TOUCH_MAJOR, MTDEV_CODE_TOUCH_MINOR, \ MTDEV_CODE_TRACKING_ID, MTDEV_ABS_POSITION_X, \ MTDEV_ABS_POSITION_Y, MTDEV_ABS_TOUCH_MINOR, \ MTDEV_ABS_TOUCH_MAJOR from kivy.input.provider import MotionEventProvider from kivy.input.factory import MotionEventFactory from kivy.logger import Logger class MTDMotionEventProvider(MotionEventProvider): options = ('min_position_x', 'max_position_x', 'min_position_y', 'max_position_y', 'min_pressure', 'max_pressure', 'min_touch_major', 'max_touch_major', 'min_touch_minor', 'max_touch_minor', 'invert_x', 'invert_y', 'rotation') def __init__(self, device, args): super(MTDMotionEventProvider, self).__init__(device, args) self._device = None self.input_fn = None self.default_ranges = dict() args = args.split(',') if not args: Logger.error('MTD: No filename pass to MTD configuration') Logger.error('MTD: Use /dev/input/event0 for example') return self.input_fn = args[0] Logger.info('MTD: Read event from <%s>' % self.input_fn) for arg in args[1:]: if arg == '': continue arg = arg.split('=') if len(arg) != 2: err = 'MTD: Bad parameter %s: Not in key=value format' %\ arg Logger.error(err) continue # ensure the key exist key, value = arg if key not in MTDMotionEventProvider.options: Logger.error('MTD: unknown %s option' % key) continue # ensure the value try: self.default_ranges[key] = int(value) except ValueError: err = 'MTD: invalid value %s for option %s' % (key, value) Logger.error(err) continue # all good! Logger.info('MTD: Set custom %s to %d' % (key, int(value))) if 'rotation' not in self.default_ranges: self.default_ranges['rotation'] = 0 elif self.default_ranges['rotation'] not in (0, 90, 180, 270): Logger.error('HIDInput: invalid rotation value ({})'.format( self.default_ranges['rotation'])) self.default_ranges['rotation'] = 0 def start(self): if self.input_fn is None: return self.uid = 0 self.queue = collections.deque() self.thread = threading.Thread( target=self._thread_run, kwargs=dict( queue=self.queue, input_fn=self.input_fn, device=self.device, default_ranges=self.default_ranges)) self.thread.daemon = True self.thread.start() def _thread_run(self, **kwargs): input_fn = kwargs.get('input_fn') queue = kwargs.get('queue') device = kwargs.get('device') drs = kwargs.get('default_ranges').get touches = {} touches_sent = [] point = {} l_points = {} def assign_coord(point, value, invert, coords): cx, cy = coords if invert: value = 1. - value if rotation == 0: point[cx] = value elif rotation == 90: point[cy] = value elif rotation == 180: point[cx] = 1. - value elif rotation == 270: point[cy] = 1. - value def process(points): for args in points: # this can happen if we have a touch going on already at # the start of the app if 'id' not in args: continue tid = args['id'] try: touch = touches[tid] except KeyError: touch = MTDMotionEvent(device, tid, args) touches[touch.id] = touch touch.move(args) action = 'update' if tid not in touches_sent: action = 'begin' touches_sent.append(tid) if 'delete' in args: action = 'end' del args['delete'] del touches[touch.id] touches_sent.remove(tid) touch.update_time_end() queue.append((action, touch)) def normalize(value, vmin, vmax): return (value - vmin) / float(vmax - vmin) # open mtdev device _fn = input_fn _slot = 0 try: _device = Device(_fn) except OSError as e: if e.errno == 13: # Permission denied Logger.warn( 'MTD: Unable to open device "{0}". Please ensure you' ' have the appropriate permissions.'.format(_fn)) return else: raise _changes = set() # prepare some vars to get limit of some component ab = _device.get_abs(MTDEV_ABS_POSITION_X) range_min_position_x = drs('min_position_x', ab.minimum) range_max_position_x = drs('max_position_x', ab.maximum) Logger.info('MTD: <%s> range position X is %d - %d' % (_fn, range_min_position_x, range_max_position_x)) ab = _device.get_abs(MTDEV_ABS_POSITION_Y) range_min_position_y = drs('min_position_y', ab.minimum) range_max_position_y = drs('max_position_y', ab.maximum) Logger.info('MTD: <%s> range position Y is %d - %d' % (_fn, range_min_position_y, range_max_position_y)) ab = _device.get_abs(MTDEV_ABS_TOUCH_MAJOR) range_min_major = drs('min_touch_major', ab.minimum) range_max_major = drs('max_touch_major', ab.maximum) Logger.info('MTD: <%s> range touch major is %d - %d' % (_fn, range_min_major, range_max_major)) ab = _device.get_abs(MTDEV_ABS_TOUCH_MINOR) range_min_minor = drs('min_touch_minor', ab.minimum) range_max_minor = drs('max_touch_minor', ab.maximum) Logger.info('MTD: <%s> range touch minor is %d - %d' % (_fn, range_min_minor, range_max_minor)) range_min_pressure = drs('min_pressure', 0) range_max_pressure = drs('max_pressure', 255) Logger.info('MTD: <%s> range pressure is %d - %d' % (_fn, range_min_pressure, range_max_pressure)) invert_x = int(bool(drs('invert_x', 0))) invert_y = int(bool(drs('invert_y', 0))) Logger.info('MTD: <%s> axes invertion: X is %d, Y is %d' % (_fn, invert_x, invert_y)) rotation = drs('rotation', 0) Logger.info('MTD: <%s> rotation set to %d' % (_fn, rotation)) while _device: # idle as much as we can. while _device.idle(1000): continue # got data, read all without redoing idle while True: data = _device.get() if data is None: break # set the working slot if data.type == MTDEV_TYPE_EV_ABS and \ data.code == MTDEV_CODE_SLOT: _slot = data.value continue # fill the slot if _slot not in l_points: l_points[_slot] = dict() point = l_points[_slot] ev_value = data.value ev_code = data.code if ev_code == MTDEV_CODE_POSITION_X: val = normalize(ev_value, range_min_position_x, range_max_position_x) assign_coord(point, val, invert_x, 'xy') elif ev_code == MTDEV_CODE_POSITION_Y: val = 1. - normalize(ev_value, range_min_position_y, range_max_position_y) assign_coord(point, val, invert_y, 'yx') elif ev_code == MTDEV_CODE_PRESSURE: point['pressure'] = normalize(ev_value, range_min_pressure, range_max_pressure) elif ev_code == MTDEV_CODE_TOUCH_MAJOR: point['size_w'] = normalize(ev_value, range_min_major, range_max_major) elif ev_code == MTDEV_CODE_TOUCH_MINOR: point['size_h'] = normalize(ev_value, range_min_minor, range_max_minor) elif ev_code == MTDEV_CODE_TRACKING_ID: if ev_value == -1: point['delete'] = True # force process of changes here, as the slot can be # reused. _changes.add(_slot) process([l_points[x] for x in _changes]) _changes.clear() continue else: point['id'] = ev_value else: # unrecognized command, ignore. continue _changes.add(_slot) # push all changes if _changes: process([l_points[x] for x in _changes]) _changes.clear() def update(self, dispatch_fn): # dispatch all event from threads try: while True: event_type, touch = self.queue.popleft() dispatch_fn(event_type, touch) except: pass MotionEventFactory.register('mtdev', MTDMotionEventProvider)
true
true
f7029ebf8b275c2961b1fda13c18da5f443bfc24
825
py
Python
backend/data_export/tests/test_dataset.py
arcada-uas/doccano
c29aece3dd4504eeaaa3466af0663bfe18b90dc1
[ "MIT" ]
2,082
2018-05-09T07:16:21.000Z
2019-12-01T16:41:50.000Z
backend/data_export/tests/test_dataset.py
arcada-uas/doccano
c29aece3dd4504eeaaa3466af0663bfe18b90dc1
[ "MIT" ]
365
2018-07-31T13:49:05.000Z
2019-11-29T11:25:17.000Z
backend/data_export/tests/test_dataset.py
arcada-uas/doccano
c29aece3dd4504eeaaa3466af0663bfe18b90dc1
[ "MIT" ]
476
2018-08-17T06:43:57.000Z
2019-12-01T09:47:08.000Z
import unittest from unittest.mock import MagicMock import pandas as pd from pandas.testing import assert_frame_equal from data_export.pipeline.dataset import Dataset class TestDataset(unittest.TestCase): def setUp(self): example = MagicMock() example.to_dict.return_value = {"data": "example"} self.examples = MagicMock() self.examples.__iter__.return_value = [example] label = MagicMock() label.find_by.return_value = {"labels": ["label"]} self.labels = MagicMock() self.labels.__iter__.return_value = [label] def test_to_dataframe(self): dataset = Dataset(self.examples, self.labels) df = dataset.to_dataframe() expected = pd.DataFrame([{"data": "example", "labels": ["label"]}]) assert_frame_equal(df, expected)
31.730769
75
0.672727
import unittest from unittest.mock import MagicMock import pandas as pd from pandas.testing import assert_frame_equal from data_export.pipeline.dataset import Dataset class TestDataset(unittest.TestCase): def setUp(self): example = MagicMock() example.to_dict.return_value = {"data": "example"} self.examples = MagicMock() self.examples.__iter__.return_value = [example] label = MagicMock() label.find_by.return_value = {"labels": ["label"]} self.labels = MagicMock() self.labels.__iter__.return_value = [label] def test_to_dataframe(self): dataset = Dataset(self.examples, self.labels) df = dataset.to_dataframe() expected = pd.DataFrame([{"data": "example", "labels": ["label"]}]) assert_frame_equal(df, expected)
true
true
f7029fff48454ff996b3fa84325dfe291ea89943
706
py
Python
src/python/grpcio_channelz/grpc_version.py
xeno14/grpc
ceb12c6be5f801997db976b564ddb14f95e46ce9
[ "Apache-2.0" ]
2
2019-08-15T18:29:13.000Z
2020-11-03T20:18:42.000Z
src/python/grpcio_channelz/grpc_version.py
xeno14/grpc
ceb12c6be5f801997db976b564ddb14f95e46ce9
[ "Apache-2.0" ]
2
2017-03-07T22:54:36.000Z
2017-04-14T15:17:36.000Z
src/python/grpcio_channelz/grpc_version.py
xeno14/grpc
ceb12c6be5f801997db976b564ddb14f95e46ce9
[ "Apache-2.0" ]
1
2021-08-03T19:12:54.000Z
2021-08-03T19:12:54.000Z
# Copyright 2018 The gRPC Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_channelz/grpc_version.py.template`!!! VERSION = '1.23.0.dev0'
39.222222
99
0.763456
VERSION = '1.23.0.dev0'
true
true
f702a06791c1c7d80d4cc9c623916b3a47b3c85a
15,279
py
Python
tools/external_converter_v2/parser/kill_fluid/fluid_helper.py
kiritigowda/Anakin
4ba2329153163590e11875dc6b4150031066915d
[ "Apache-2.0" ]
null
null
null
tools/external_converter_v2/parser/kill_fluid/fluid_helper.py
kiritigowda/Anakin
4ba2329153163590e11875dc6b4150031066915d
[ "Apache-2.0" ]
3
2018-06-22T09:08:44.000Z
2018-07-04T08:38:30.000Z
tools/external_converter_v2/parser/kill_fluid/fluid_helper.py
kiritigowda/Anakin
4ba2329153163590e11875dc6b4150031066915d
[ "Apache-2.0" ]
null
null
null
from ..proto import * from ..graph_io import * import paddle.fluid as fluid import numpy as np from paddle.fluid.core import VarDesc, AttrType def union(list_a, list_b): return list(set(list_a).union(set(list_b))) def difference(list_a, list_b): return list(set(list_a).difference(set(list_b))) class Edge_for_fluid: def __init__(self, param, target, var): self.param = param self.target = target self.var = var class Fluid_edger: def __init__(self, param = None, target = None, var = None): self.edges = [] if param is not None and target is not None: edge = Edge_for_fluid(param, target, var) self.edges.append(edge) def __call__(self): return self.all_targets() def add(self, param, target, var = None): edge = Edge_for_fluid(param, target, var) self.edges.append(edge) def rm_edges_by_param(self, param): for edge in self.edges: if edge.param == param: edge_idx = self.edges.index(edge) del self.edges[edge_idx] def rm(self, target): res = -1 for edge in self.edges: if target == edge.target: edge_idx = self.edges.index(edge) del self.edges[edge_idx] res = res + 1 if res != 0: pass def mv(self, old_target, new_target): res = -1 for edge in self.edges: if old_target == edge.target: edge.target = new_target res = res + 1 if res != 0: pass def all_params(self): params = [] for edge in self.edges: if edge.param not in params: params.append(edge.param) return params def all_targets(self): targets = [] for edge in self.edges: targets.append(edge.target) return targets def targets(self, param): targets = [] for edge in self.edges: if edge.param == param: targets.append(edge.target) return targets def target(self, param, idx = 0): return self.targets(param)[idx] def clear(self): targets_list = self.all_targets() for target in targets_list: self.rm(target) def targets_with_params(self): list_of_targets_and_params = [] for edge in self.edges: target_and_param = [edge.target, edge.param] list_of_targets_and_params.append(target_and_param) return list_of_targets_and_params def vars_by_target(self, target): vars = [] for edge in self.edges: if edge.target == target and edge.var is not None: vars.append(edge.var) return vars def __getitem__(self, idx): if idx < len(self.edges): return self.edges[idx] return None class Fluid_helper: def __init__(self, scope, block): self.scope = scope self.block = block def args_by_input_param(self, op, param_name): if param_name in op.input_names: return op.input(param_name) else: raise NameError('ERROR: param_name %s is not exists.' % ( param_name ) ) def args_by_output_param(self, op, param_name): if param_name in op.output_names: return op.output(param_name) else: raise NameError('ERROR: param_name %s is not exists.' % ( param_name ) ) def var_by_input_param(self, op, param_name, var_idx = 0): var_name = self.args_by_input_param(op, param_name)[var_idx] var = self.block.var(var_name) return var def var_by_output_param(self, op, param_name, var_idx = 0): var_name = self.args_by_output_param(op, param_name)[var_idx] var = self.block.var(var_name) return var def var_name_by_param(self, op, param_name, var_idx = 0): if param_name not in op.input_names + op.output_names: raise NameError('ERROR: param_name %s is not exists.' % ( param_name ) ) elif param_name in op.input_names: if len(op.input(param_name)) > 0: var_name_unicode = op.input(param_name)[var_idx] else: raise NameError('ERROR: param %s has not var.' % ( param_name ) ) elif param_name in op.output_names: if len(op.output(param_name)) > 0: var_name_unicode = op.output(param_name)[var_idx] else: raise NameError('ERROR: param %s has not var.' % ( param_name ) ) var = self.block.var(var_name_unicode) var_name = var.name return var_name def var_by_param(self, op, param_name, var_idx = 0): var_name = self.var_name_by_param(op, param_name, var_idx) var = self.block.var(var_name) return var def shape_by_var_name(self, var_name, layout = 'NCHW'): var = self.block.var(var_name) long_tuple = var.shape long_list = list(long_tuple) if layout == 'NCHW': int_list_4d = map(int, [1]*(4-len(long_list)) + long_list) return int_list_4d elif layout == 'UNMODIFIED': return long_list else: raise NameError('ERROR: layout %s is not implemented yet.' % ( layout ) ) def np_data_by_var_name(self, var_name): numpy_array = fluid.executor.fetch_var(var_name, self.scope, True) return numpy_array def dtype_by_var_name(self, var_name): var = self.block.var(var_name) fluid_var_type = var.dtype dtype = ANAKIN_TENSOR_DTYPE[fluid_var_type] return dtype def is_persistable_param(self, op, param_name, var_idx = 0): var = self.var_by_param(op, param_name, var_idx) is_persistable_var = var.persistable return is_persistable_var def var_shape_by_param(self, transpose, op, param_name, var_idx = 0, layout = 'NCHW'): if transpose is True: raise NameError('ERROR: var_shape transpose is not implemented yet.') else: var_name = self.var_name_by_param(op, param_name, var_idx) shape = self.shape_by_var_name(var_name, layout) return shape def data_with_shape_by_param(self, op, param_name, transpose = False, axes = None, var_idx = 0, is_flat_list = True, layout = 'NCHW'): np.set_printoptions(threshold=np.inf, suppress=True) var_name = self.var_name_by_param(op, param_name, var_idx) np_array = self.np_data_by_var_name(var_name) if transpose is True: np_array = np.transpose(np_array, axes) np_shape = np.shape(np_array) if layout == 'NCHW': np_shape = map(int, [1]*(4-len(np_shape)) + list(np_shape)) if is_flat_list is True: flat_list = list(np_array.flatten()) return [flat_list, np_shape] else: return [np_array, np_shape] def np_param(self, op, param_name, transpose = False, axes = None, var_idx = 0): [data, np_shape] = self.data_with_shape_by_param(op, param_name, transpose, \ axes, var_idx, False) return data def dtype_by_param(self, op, param_name, var_idx = 0): var_name = self.var_name_by_param(op, param_name, var_idx) dtype = self.dtype_by_var_name(var_name) return dtype def is_list_type(self, op, attr_name): if op.has_attr(attr_name): fluid_attr_type = op.attr_type(attr_name) if fluid_attr_type in ANAKIN_ATTR_IS_LIST.keys(): return ANAKIN_ATTR_IS_LIST[fluid_attr_type] else: return False # AttrType.LONG else: raise NameError('ERROR: attr_name %s is not exists.' % ( attr_name ) ) def dtype_of_attr(self, op, attr_name): if op.has_attr(attr_name): fluid_attr_type = op.attr_type(attr_name) if fluid_attr_type in ANAKIN_ATTR_DTYPE.keys(): return ANAKIN_ATTR_DTYPE[fluid_attr_type] else: return INT32 # AttrType.LONG else: raise NameError('ERROR: attr_name %s is not exists.' % ( attr_name ) ) def attr_data_required(self, op, attr_name): data = op.attr(attr_name) is_list = self.is_list_type(op, attr_name) dtype = self.dtype_of_attr(op, attr_name) if dtype not in [INT32, FLOAT, STR]: return data elif dtype == INT32: return map(int, data) if is_list else int(data) elif dtype == FLOAT: return map(float, data) if is_list else float(data) elif dtype == STR: return bytes(data) def attr_data(self, op, attr_name, default_value = 0, type = None): if op.has_attr(attr_name): return self.attr_data_required(op, attr_name) else: #raise NameError('ERROR: attr_name %s is not exists.' % ( attr_name ) ) return default_value def param_tensor_sh(self, op, param_name, transpose = False, axes = None, reshape = None, var_idx = 0, layout = 'NCHW'): tensor = TensorProtoIO() [flat_data, shape] = self.data_with_shape_by_param(op, param_name, transpose, \ axes, var_idx, True, layout) dtype = self.dtype_by_param(op, param_name, var_idx) tensor.set_data_type(dtype) if dtype in ANAKIN_TENSOR_DTYPESTR.keys(): tensor.set_data(flat_data, ANAKIN_TENSOR_DTYPESTR[dtype]) #pass #debug else: raise NameError('ERROR: Unknown data type (%s)' % ( dtype ) ) if reshape is not None: tensor.set_shape(reshape) else: tensor.set_shape(shape) return [tensor, shape] def param_tensor(self, op, param_name, transpose = False, axes = None, reshape = None, var_idx = 0, layout = 'NCHW'): [tensor, shape] = self.param_tensor_sh(op, param_name, transpose, axes, \ reshape, var_idx, layout) return tensor def create_tensor(self, data_list, data_shape, dtype): tensor = TensorProtoIO() tensor.set_data_type(dtype) tensor.set_data(data_list, ANAKIN_TENSOR_DTYPESTR[dtype]) tensor.set_shape(data_shape) return tensor def gru_tensor_convert(self, origin_h2h, origin_i2h, origin_b, offset=[2, 1, 0]): hidden_size = int(origin_b.size // 3) word_size = int(origin_i2h.size // hidden_size // 3) tar_h2h=np.array(origin_h2h.flatten().tolist()[2*hidden_size*hidden_size:]\ +np.array(origin_h2h.flatten().tolist()[:2*hidden_size*hidden_size])\ .reshape(hidden_size,2,hidden_size)[:,[1,0],:].flatten().tolist())\ .reshape(1,1,hidden_size,3*hidden_size) tar_i2h=origin_i2h.reshape(word_size,3,hidden_size)[:,offset,:]\ .reshape(1,1,word_size,3*hidden_size) tar_b=origin_b.reshape(3, hidden_size)[offset, :].reshape(1,1,1,3 * hidden_size) tar_i2h_h2h=np.concatenate([tar_i2h.flatten(),tar_h2h.flatten()])\ .reshape(1,1,1,3*hidden_size*hidden_size+3*word_size*hidden_size) return tar_i2h_h2h, tar_b def lstm_fc_tensor_merge_convert(self, origin_hidden_size, origin_lstm_w, origin_lstm_b, origin_fc_w, origin_fc_b): layer_size = int (origin_hidden_size // 4) input_size = int (origin_fc_w.size // origin_hidden_size) lstm_bias_num = int (origin_lstm_b.size // layer_size) tar_w = np.vstack((np.hstack((origin_fc_w[:, 1 * layer_size : 2 * layer_size], origin_fc_w[:, 2 * layer_size : 3 * layer_size], origin_fc_w[:, : 1 * layer_size], origin_fc_w[:, 3 * layer_size :])), np.hstack((origin_lstm_w[:, 1 * layer_size : 2 * layer_size], origin_lstm_w[:, 2 * layer_size : 3 * layer_size], origin_lstm_w[:, : 1 * layer_size], origin_lstm_w[:, 3 * layer_size : ])))) if origin_fc_b is not None: split_fc_bc = origin_fc_b.flatten()[: 1 * layer_size] split_fc_bi = origin_fc_b.flatten()[1 * layer_size : 2 * layer_size] split_fc_bf = origin_fc_b.flatten()[2 * layer_size : 3 * layer_size] split_fc_bo = origin_fc_b.flatten()[3 * layer_size : 4 * layer_size] else: split_fc_bc = np.zeros(layer_size) split_fc_bi = np.zeros(layer_size) split_fc_bf = np.zeros(layer_size) split_fc_bo = np.zeros(layer_size) split_lstm_bc = origin_lstm_b.flatten()[: 1 * layer_size] split_lstm_bi = origin_lstm_b.flatten()[1 * layer_size: 2 * layer_size] split_lstm_bf = origin_lstm_b.flatten()[2 * layer_size: 3 * layer_size] split_lstm_bo = origin_lstm_b.flatten()[3 * layer_size: 4 * layer_size] split_lstm_bc = np.add(split_lstm_bc, split_fc_bc) split_lstm_bi = np.add(split_lstm_bi, split_fc_bi) split_lstm_bf = np.add(split_lstm_bf, split_fc_bf) split_lstm_bo = np.add(split_lstm_bo, split_fc_bo) if lstm_bias_num == 4: tar_b = np.array(split_lstm_bi.flatten().tolist() + split_lstm_bf.flatten().tolist() + split_lstm_bc.flatten().tolist() + split_lstm_bo.flatten().tolist()) else: split_lstm_wic = origin_lstm_b.flatten()[4 * layer_size : 5 * layer_size] split_lstm_wfc = origin_lstm_b.flatten()[5 * layer_size : 6 * layer_size] split_lstm_woc = origin_lstm_b.flatten()[6 * layer_size :] tar_b = np.array(split_lstm_bi.flatten().tolist() + split_lstm_bf.flatten().tolist() + split_lstm_bc.flatten().tolist() + split_lstm_bo.flatten().tolist() + split_lstm_wic.flatten().tolist() + split_lstm_wfc.flatten().tolist() + split_lstm_woc.flatten().tolist()) return tar_w.reshape(input_size+ layer_size, 4 * layer_size, 1, 1),\ tar_b.reshape(1, origin_lstm_b.size, 1, 1) class Fluid_comparator: def __init__(self, helper): self.helper = helper self.only_list = ['feed', 'fetch'] def compare_by_param(self, op_a, op_b, param): is_weight_a = self.helper.is_persistable_param(op_a, param) is_weight_b = self.helper.is_persistable_param(op_b, param) if is_weight_a and is_weight_b: np_a = self.helper.np_param(op_a, param) np_b = self.helper.np_param(op_b, param) if (np_a == np_b).all() == True: return True else: return False elif is_weight_a is is_weight_b: return True else: return False def have_same_weights(self, op_a, op_b): is_same = True if op_a.input_names == op_b.input_names: params = op_a.input_names for param in params: if self.compare_by_param(op_a, op_b, param) is False: is_same = False return is_same else: return False def compare_by_attr(self, op_a, op_b, attr_name): data_a = self.helper.attr_data(op_a, attr_name) data_b = self.helper.attr_data(op_b, attr_name) return data_a == data_b def have_same_attrs(self, op_a, op_b): is_same = True if op_a.attr_names == op_b.attr_names: attrs = op_a.attr_names for attr in attrs: if self.compare_by_attr(op_a, op_b, attr) is False: is_same = False return is_same else: return False def brothers(self, op_list): is_same = True if len(op_list) > 1: idx = 0 for op_b in op_list[1:]: if op_b.type not in self.only_list: idx = op_list.index(op_b) op_a = op_list[idx - 1] if op_a.type not in self.only_list: same_weights = self.have_same_weights(op_a, op_b) same_attrs = self.have_same_attrs(op_a, op_b) if (same_weights and same_attrs) is False: is_same = False else: raise NameError('ERROR: %s is in only_list.' % ( op_a.type )) else: raise NameError('ERROR: %s is in only_list.' % ( op_b.type )) return is_same else: raise NameError('ERROR: Members of op_list must be greater than 2.') ANAKIN_TENSOR_DTYPE = { VarDesc.VarType.BOOL: BOOLEN, VarDesc.VarType.INT32: INT32, VarDesc.VarType.FP16: FLOAT16, VarDesc.VarType.FP32: FLOAT, VarDesc.VarType.FP64: DOUBLE, } ANAKIN_TENSOR_DTYPESTR = { STR: "string", INT32: "int", FLOAT: "float", BOOLEN: "bool", } ANAKIN_ATTR_DTYPE = { AttrType.INT: INT32, AttrType.INTS: INT32, AttrType.FLOAT: FLOAT, AttrType.FLOATS: FLOAT, AttrType.STRING: STR, AttrType.STRINGS: STR, AttrType.BOOL: BOOLEN, AttrType.BOOLS: BOOLEN, } ANAKIN_ATTR_IS_LIST = { AttrType.INT: False, AttrType.INTS: True, AttrType.FLOAT: False, AttrType.FLOATS: True, AttrType.STRING: False, AttrType.STRINGS: True, AttrType.BOOL: False, AttrType.BOOLS: True, } APPEND_BIAS_OP_TYPE = [ 'FC', 'mul', 'sequence_conv', 'conv2d', 'conv2d_transpose', 'depthwise_conv2d', 'elementwise_mul', ] APPEND_ACT_OP_TYPE = [ 'FC', 'mul', 'sequence_conv', 'conv2d', 'conv2d_transpose', 'batch_norm', 'layer_norm', 'row_conv', 'reshape', ]
29.553191
116
0.698344
from ..proto import * from ..graph_io import * import paddle.fluid as fluid import numpy as np from paddle.fluid.core import VarDesc, AttrType def union(list_a, list_b): return list(set(list_a).union(set(list_b))) def difference(list_a, list_b): return list(set(list_a).difference(set(list_b))) class Edge_for_fluid: def __init__(self, param, target, var): self.param = param self.target = target self.var = var class Fluid_edger: def __init__(self, param = None, target = None, var = None): self.edges = [] if param is not None and target is not None: edge = Edge_for_fluid(param, target, var) self.edges.append(edge) def __call__(self): return self.all_targets() def add(self, param, target, var = None): edge = Edge_for_fluid(param, target, var) self.edges.append(edge) def rm_edges_by_param(self, param): for edge in self.edges: if edge.param == param: edge_idx = self.edges.index(edge) del self.edges[edge_idx] def rm(self, target): res = -1 for edge in self.edges: if target == edge.target: edge_idx = self.edges.index(edge) del self.edges[edge_idx] res = res + 1 if res != 0: pass def mv(self, old_target, new_target): res = -1 for edge in self.edges: if old_target == edge.target: edge.target = new_target res = res + 1 if res != 0: pass def all_params(self): params = [] for edge in self.edges: if edge.param not in params: params.append(edge.param) return params def all_targets(self): targets = [] for edge in self.edges: targets.append(edge.target) return targets def targets(self, param): targets = [] for edge in self.edges: if edge.param == param: targets.append(edge.target) return targets def target(self, param, idx = 0): return self.targets(param)[idx] def clear(self): targets_list = self.all_targets() for target in targets_list: self.rm(target) def targets_with_params(self): list_of_targets_and_params = [] for edge in self.edges: target_and_param = [edge.target, edge.param] list_of_targets_and_params.append(target_and_param) return list_of_targets_and_params def vars_by_target(self, target): vars = [] for edge in self.edges: if edge.target == target and edge.var is not None: vars.append(edge.var) return vars def __getitem__(self, idx): if idx < len(self.edges): return self.edges[idx] return None class Fluid_helper: def __init__(self, scope, block): self.scope = scope self.block = block def args_by_input_param(self, op, param_name): if param_name in op.input_names: return op.input(param_name) else: raise NameError('ERROR: param_name %s is not exists.' % ( param_name ) ) def args_by_output_param(self, op, param_name): if param_name in op.output_names: return op.output(param_name) else: raise NameError('ERROR: param_name %s is not exists.' % ( param_name ) ) def var_by_input_param(self, op, param_name, var_idx = 0): var_name = self.args_by_input_param(op, param_name)[var_idx] var = self.block.var(var_name) return var def var_by_output_param(self, op, param_name, var_idx = 0): var_name = self.args_by_output_param(op, param_name)[var_idx] var = self.block.var(var_name) return var def var_name_by_param(self, op, param_name, var_idx = 0): if param_name not in op.input_names + op.output_names: raise NameError('ERROR: param_name %s is not exists.' % ( param_name ) ) elif param_name in op.input_names: if len(op.input(param_name)) > 0: var_name_unicode = op.input(param_name)[var_idx] else: raise NameError('ERROR: param %s has not var.' % ( param_name ) ) elif param_name in op.output_names: if len(op.output(param_name)) > 0: var_name_unicode = op.output(param_name)[var_idx] else: raise NameError('ERROR: param %s has not var.' % ( param_name ) ) var = self.block.var(var_name_unicode) var_name = var.name return var_name def var_by_param(self, op, param_name, var_idx = 0): var_name = self.var_name_by_param(op, param_name, var_idx) var = self.block.var(var_name) return var def shape_by_var_name(self, var_name, layout = 'NCHW'): var = self.block.var(var_name) long_tuple = var.shape long_list = list(long_tuple) if layout == 'NCHW': int_list_4d = map(int, [1]*(4-len(long_list)) + long_list) return int_list_4d elif layout == 'UNMODIFIED': return long_list else: raise NameError('ERROR: layout %s is not implemented yet.' % ( layout ) ) def np_data_by_var_name(self, var_name): numpy_array = fluid.executor.fetch_var(var_name, self.scope, True) return numpy_array def dtype_by_var_name(self, var_name): var = self.block.var(var_name) fluid_var_type = var.dtype dtype = ANAKIN_TENSOR_DTYPE[fluid_var_type] return dtype def is_persistable_param(self, op, param_name, var_idx = 0): var = self.var_by_param(op, param_name, var_idx) is_persistable_var = var.persistable return is_persistable_var def var_shape_by_param(self, transpose, op, param_name, var_idx = 0, layout = 'NCHW'): if transpose is True: raise NameError('ERROR: var_shape transpose is not implemented yet.') else: var_name = self.var_name_by_param(op, param_name, var_idx) shape = self.shape_by_var_name(var_name, layout) return shape def data_with_shape_by_param(self, op, param_name, transpose = False, axes = None, var_idx = 0, is_flat_list = True, layout = 'NCHW'): np.set_printoptions(threshold=np.inf, suppress=True) var_name = self.var_name_by_param(op, param_name, var_idx) np_array = self.np_data_by_var_name(var_name) if transpose is True: np_array = np.transpose(np_array, axes) np_shape = np.shape(np_array) if layout == 'NCHW': np_shape = map(int, [1]*(4-len(np_shape)) + list(np_shape)) if is_flat_list is True: flat_list = list(np_array.flatten()) return [flat_list, np_shape] else: return [np_array, np_shape] def np_param(self, op, param_name, transpose = False, axes = None, var_idx = 0): [data, np_shape] = self.data_with_shape_by_param(op, param_name, transpose, \ axes, var_idx, False) return data def dtype_by_param(self, op, param_name, var_idx = 0): var_name = self.var_name_by_param(op, param_name, var_idx) dtype = self.dtype_by_var_name(var_name) return dtype def is_list_type(self, op, attr_name): if op.has_attr(attr_name): fluid_attr_type = op.attr_type(attr_name) if fluid_attr_type in ANAKIN_ATTR_IS_LIST.keys(): return ANAKIN_ATTR_IS_LIST[fluid_attr_type] else: return False else: raise NameError('ERROR: attr_name %s is not exists.' % ( attr_name ) ) def dtype_of_attr(self, op, attr_name): if op.has_attr(attr_name): fluid_attr_type = op.attr_type(attr_name) if fluid_attr_type in ANAKIN_ATTR_DTYPE.keys(): return ANAKIN_ATTR_DTYPE[fluid_attr_type] else: return INT32 else: raise NameError('ERROR: attr_name %s is not exists.' % ( attr_name ) ) def attr_data_required(self, op, attr_name): data = op.attr(attr_name) is_list = self.is_list_type(op, attr_name) dtype = self.dtype_of_attr(op, attr_name) if dtype not in [INT32, FLOAT, STR]: return data elif dtype == INT32: return map(int, data) if is_list else int(data) elif dtype == FLOAT: return map(float, data) if is_list else float(data) elif dtype == STR: return bytes(data) def attr_data(self, op, attr_name, default_value = 0, type = None): if op.has_attr(attr_name): return self.attr_data_required(op, attr_name) else: return default_value def param_tensor_sh(self, op, param_name, transpose = False, axes = None, reshape = None, var_idx = 0, layout = 'NCHW'): tensor = TensorProtoIO() [flat_data, shape] = self.data_with_shape_by_param(op, param_name, transpose, \ axes, var_idx, True, layout) dtype = self.dtype_by_param(op, param_name, var_idx) tensor.set_data_type(dtype) if dtype in ANAKIN_TENSOR_DTYPESTR.keys(): tensor.set_data(flat_data, ANAKIN_TENSOR_DTYPESTR[dtype]) e: raise NameError('ERROR: Unknown data type (%s)' % ( dtype ) ) if reshape is not None: tensor.set_shape(reshape) else: tensor.set_shape(shape) return [tensor, shape] def param_tensor(self, op, param_name, transpose = False, axes = None, reshape = None, var_idx = 0, layout = 'NCHW'): [tensor, shape] = self.param_tensor_sh(op, param_name, transpose, axes, \ reshape, var_idx, layout) return tensor def create_tensor(self, data_list, data_shape, dtype): tensor = TensorProtoIO() tensor.set_data_type(dtype) tensor.set_data(data_list, ANAKIN_TENSOR_DTYPESTR[dtype]) tensor.set_shape(data_shape) return tensor def gru_tensor_convert(self, origin_h2h, origin_i2h, origin_b, offset=[2, 1, 0]): hidden_size = int(origin_b.size // 3) word_size = int(origin_i2h.size // hidden_size // 3) tar_h2h=np.array(origin_h2h.flatten().tolist()[2*hidden_size*hidden_size:]\ +np.array(origin_h2h.flatten().tolist()[:2*hidden_size*hidden_size])\ .reshape(hidden_size,2,hidden_size)[:,[1,0],:].flatten().tolist())\ .reshape(1,1,hidden_size,3*hidden_size) tar_i2h=origin_i2h.reshape(word_size,3,hidden_size)[:,offset,:]\ .reshape(1,1,word_size,3*hidden_size) tar_b=origin_b.reshape(3, hidden_size)[offset, :].reshape(1,1,1,3 * hidden_size) tar_i2h_h2h=np.concatenate([tar_i2h.flatten(),tar_h2h.flatten()])\ .reshape(1,1,1,3*hidden_size*hidden_size+3*word_size*hidden_size) return tar_i2h_h2h, tar_b def lstm_fc_tensor_merge_convert(self, origin_hidden_size, origin_lstm_w, origin_lstm_b, origin_fc_w, origin_fc_b): layer_size = int (origin_hidden_size // 4) input_size = int (origin_fc_w.size // origin_hidden_size) lstm_bias_num = int (origin_lstm_b.size // layer_size) tar_w = np.vstack((np.hstack((origin_fc_w[:, 1 * layer_size : 2 * layer_size], origin_fc_w[:, 2 * layer_size : 3 * layer_size], origin_fc_w[:, : 1 * layer_size], origin_fc_w[:, 3 * layer_size :])), np.hstack((origin_lstm_w[:, 1 * layer_size : 2 * layer_size], origin_lstm_w[:, 2 * layer_size : 3 * layer_size], origin_lstm_w[:, : 1 * layer_size], origin_lstm_w[:, 3 * layer_size : ])))) if origin_fc_b is not None: split_fc_bc = origin_fc_b.flatten()[: 1 * layer_size] split_fc_bi = origin_fc_b.flatten()[1 * layer_size : 2 * layer_size] split_fc_bf = origin_fc_b.flatten()[2 * layer_size : 3 * layer_size] split_fc_bo = origin_fc_b.flatten()[3 * layer_size : 4 * layer_size] else: split_fc_bc = np.zeros(layer_size) split_fc_bi = np.zeros(layer_size) split_fc_bf = np.zeros(layer_size) split_fc_bo = np.zeros(layer_size) split_lstm_bc = origin_lstm_b.flatten()[: 1 * layer_size] split_lstm_bi = origin_lstm_b.flatten()[1 * layer_size: 2 * layer_size] split_lstm_bf = origin_lstm_b.flatten()[2 * layer_size: 3 * layer_size] split_lstm_bo = origin_lstm_b.flatten()[3 * layer_size: 4 * layer_size] split_lstm_bc = np.add(split_lstm_bc, split_fc_bc) split_lstm_bi = np.add(split_lstm_bi, split_fc_bi) split_lstm_bf = np.add(split_lstm_bf, split_fc_bf) split_lstm_bo = np.add(split_lstm_bo, split_fc_bo) if lstm_bias_num == 4: tar_b = np.array(split_lstm_bi.flatten().tolist() + split_lstm_bf.flatten().tolist() + split_lstm_bc.flatten().tolist() + split_lstm_bo.flatten().tolist()) else: split_lstm_wic = origin_lstm_b.flatten()[4 * layer_size : 5 * layer_size] split_lstm_wfc = origin_lstm_b.flatten()[5 * layer_size : 6 * layer_size] split_lstm_woc = origin_lstm_b.flatten()[6 * layer_size :] tar_b = np.array(split_lstm_bi.flatten().tolist() + split_lstm_bf.flatten().tolist() + split_lstm_bc.flatten().tolist() + split_lstm_bo.flatten().tolist() + split_lstm_wic.flatten().tolist() + split_lstm_wfc.flatten().tolist() + split_lstm_woc.flatten().tolist()) return tar_w.reshape(input_size+ layer_size, 4 * layer_size, 1, 1),\ tar_b.reshape(1, origin_lstm_b.size, 1, 1) class Fluid_comparator: def __init__(self, helper): self.helper = helper self.only_list = ['feed', 'fetch'] def compare_by_param(self, op_a, op_b, param): is_weight_a = self.helper.is_persistable_param(op_a, param) is_weight_b = self.helper.is_persistable_param(op_b, param) if is_weight_a and is_weight_b: np_a = self.helper.np_param(op_a, param) np_b = self.helper.np_param(op_b, param) if (np_a == np_b).all() == True: return True else: return False elif is_weight_a is is_weight_b: return True else: return False def have_same_weights(self, op_a, op_b): is_same = True if op_a.input_names == op_b.input_names: params = op_a.input_names for param in params: if self.compare_by_param(op_a, op_b, param) is False: is_same = False return is_same else: return False def compare_by_attr(self, op_a, op_b, attr_name): data_a = self.helper.attr_data(op_a, attr_name) data_b = self.helper.attr_data(op_b, attr_name) return data_a == data_b def have_same_attrs(self, op_a, op_b): is_same = True if op_a.attr_names == op_b.attr_names: attrs = op_a.attr_names for attr in attrs: if self.compare_by_attr(op_a, op_b, attr) is False: is_same = False return is_same else: return False def brothers(self, op_list): is_same = True if len(op_list) > 1: idx = 0 for op_b in op_list[1:]: if op_b.type not in self.only_list: idx = op_list.index(op_b) op_a = op_list[idx - 1] if op_a.type not in self.only_list: same_weights = self.have_same_weights(op_a, op_b) same_attrs = self.have_same_attrs(op_a, op_b) if (same_weights and same_attrs) is False: is_same = False else: raise NameError('ERROR: %s is in only_list.' % ( op_a.type )) else: raise NameError('ERROR: %s is in only_list.' % ( op_b.type )) return is_same else: raise NameError('ERROR: Members of op_list must be greater than 2.') ANAKIN_TENSOR_DTYPE = { VarDesc.VarType.BOOL: BOOLEN, VarDesc.VarType.INT32: INT32, VarDesc.VarType.FP16: FLOAT16, VarDesc.VarType.FP32: FLOAT, VarDesc.VarType.FP64: DOUBLE, } ANAKIN_TENSOR_DTYPESTR = { STR: "string", INT32: "int", FLOAT: "float", BOOLEN: "bool", } ANAKIN_ATTR_DTYPE = { AttrType.INT: INT32, AttrType.INTS: INT32, AttrType.FLOAT: FLOAT, AttrType.FLOATS: FLOAT, AttrType.STRING: STR, AttrType.STRINGS: STR, AttrType.BOOL: BOOLEN, AttrType.BOOLS: BOOLEN, } ANAKIN_ATTR_IS_LIST = { AttrType.INT: False, AttrType.INTS: True, AttrType.FLOAT: False, AttrType.FLOATS: True, AttrType.STRING: False, AttrType.STRINGS: True, AttrType.BOOL: False, AttrType.BOOLS: True, } APPEND_BIAS_OP_TYPE = [ 'FC', 'mul', 'sequence_conv', 'conv2d', 'conv2d_transpose', 'depthwise_conv2d', 'elementwise_mul', ] APPEND_ACT_OP_TYPE = [ 'FC', 'mul', 'sequence_conv', 'conv2d', 'conv2d_transpose', 'batch_norm', 'layer_norm', 'row_conv', 'reshape', ]
true
true
f702a16a4c7e987eab3c8054b87c6ed9a6452980
10,100
py
Python
myven/lib/python3.8/site-packages/ansible/modules/cloud/vmware/vcenter_folder.py
baltham/dne-dna-code
4a13309a790a670d2f07e635c9264a0c29976c6a
[ "MIT" ]
1
2021-04-02T08:08:39.000Z
2021-04-02T08:08:39.000Z
myven/lib/python3.8/site-packages/ansible/modules/cloud/vmware/vcenter_folder.py
baltham/dne-dna-code
4a13309a790a670d2f07e635c9264a0c29976c6a
[ "MIT" ]
null
null
null
myven/lib/python3.8/site-packages/ansible/modules/cloud/vmware/vcenter_folder.py
baltham/dne-dna-code
4a13309a790a670d2f07e635c9264a0c29976c6a
[ "MIT" ]
1
2020-05-03T01:13:16.000Z
2020-05-03T01:13:16.000Z
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = r''' --- module: vcenter_folder short_description: Manage folders on given datacenter description: - This module can be used to create, delete, move and rename folder on then given datacenter. version_added: '2.5' author: - Abhijeet Kasurde (@akasurde) notes: - Tested on vSphere 6.5 requirements: - python >= 2.6 - PyVmomi options: datacenter: description: - Name of the datacenter. required: True folder_name: description: - Name of folder to be managed. - This is case sensitive parameter. - Folder name should be under 80 characters. This is a VMware restriction. required: True parent_folder: description: - Name of the parent folder under which new folder needs to be created. - This is case sensitive parameter. - Please specify unique folder name as there is no way to detect duplicate names. - "If user wants to create a folder under '/DC0/vm/vm_folder', this value will be 'vm_folder'." required: False folder_type: description: - This is type of folder. - "If set to C(vm), then 'VM and Template Folder' is created under datacenter." - "If set to C(host), then 'Host and Cluster Folder' is created under datacenter." - "If set to C(datastore), then 'Storage Folder' is created under datacenter." - "If set to C(network), then 'Network Folder' is created under datacenter." - This parameter is required, if C(state) is set to C(present) and parent_folder is absent. - This option is ignored, if C(parent_folder) is set. default: vm required: False choices: [ datastore, host, network, vm ] state: description: - State of folder. - If set to C(present) without parent folder parameter, then folder with C(folder_type) is created. - If set to C(present) with parent folder parameter, then folder in created under parent folder. C(folder_type) is ignored. - If set to C(absent), then folder is unregistered and destroyed. default: present choices: [ present, absent ] extends_documentation_fragment: vmware.documentation ''' EXAMPLES = r''' - name: Create a VM folder on given datacenter vcenter_folder: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' datacenter: datacenter_name folder_name: sample_vm_folder folder_type: vm state: present register: vm_folder_creation_result - name: Create a datastore folder on given datacenter vcenter_folder: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' datacenter: datacenter_name folder_name: sample_datastore_folder folder_type: datastore state: present register: datastore_folder_creation_result - name: Create a sub folder under VM folder on given datacenter vcenter_folder: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' datacenter: datacenter_name folder_name: sample_sub_folder parent_folder: vm_folder state: present register: sub_folder_creation_result - name: Delete a VM folder on given datacenter vcenter_folder: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' datacenter: datacenter_name folder_name: sample_vm_folder folder_type: vm state: absent register: vm_folder_deletion_result ''' RETURN = r''' result: description: - string stating about result returned: success type: string sample: "Folder 'sub_network_folder' of type 'vm' created under vm_folder successfully." ''' try: from pyVmomi import vim, vmodl except ImportError as e: pass from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi, find_datacenter_by_name, wait_for_task, get_all_objs from ansible.module_utils._text import to_native class VmwareFolderManager(PyVmomi): def __init__(self, module): super(VmwareFolderManager, self).__init__(module) datacenter_name = self.params.get('datacenter', None) self.datacenter_obj = find_datacenter_by_name(self.content, datacenter_name=datacenter_name) if self.datacenter_obj is None: self.module.fail_json(msg="Failed to find datacenter %s" % datacenter_name) def ensure(self): """ Function to manage internal state management Returns: """ state = self.module.params.get('state') folder_type = self.module.params.get('folder_type') folder_name = self.module.params.get('folder_name') parent_folder = self.module.params.get('parent_folder', None) results = dict(changed=False, result=dict()) if state == 'present': # Create a new folder try: if parent_folder: folder = self.get_folder_by_name(folder_name=parent_folder) if folder: folder.CreateFolder(folder_name) results['changed'] = True results['result'] = "Folder '%s' of type '%s' created under %s" \ " successfully." % (folder_name, folder_type, parent_folder) else: self.module.fail_json(msg="Failed to find the parent folder %s" " for folder %s" % (parent_folder, folder_name)) else: datacenter_folder_type = { 'vm': self.datacenter_obj.vmFolder, 'host': self.datacenter_obj.hostFolder, 'datastore': self.datacenter_obj.datastoreFolder, 'network': self.datacenter_obj.networkFolder, } datacenter_folder_type[folder_type].CreateFolder(folder_name) results['changed'] = True results['result'] = "Folder '%s' of type '%s' created successfully" % (folder_name, folder_type) except vim.fault.DuplicateName as duplicate_name: # To be consistent with the other vmware modules, We decided to accept this error # and the playbook should simply carry on with other tasks. # User will have to take care of this exception # https://github.com/ansible/ansible/issues/35388#issuecomment-362283078 results['changed'] = False results['result'] = "Failed to create folder as another object has same name" \ " in the same target folder : %s" % to_native(duplicate_name.msg) except vim.fault.InvalidName as invalid_name: self.module.fail_json(msg="Failed to create folder as folder name is not a valid " "entity name : %s" % to_native(invalid_name.msg)) except Exception as general_exc: self.module.fail_json(msg="Failed to create folder due to generic" " exception : %s " % to_native(general_exc)) self.module.exit_json(**results) elif state == 'absent': folder_obj = self.get_folder_by_name(folder_name=folder_name) if folder_obj: try: task = folder_obj.UnregisterAndDestroy() results['changed'], results['result'] = wait_for_task(task=task) except vim.fault.ConcurrentAccess as concurrent_access: self.module.fail_json(msg="Failed to remove folder as another client" " modified folder before this operation : %s" % to_native(concurrent_access.msg)) except vim.fault.InvalidState as invalid_state: self.module.fail_json(msg="Failed to remove folder as folder is in" " invalid state" % to_native(invalid_state.msg)) except Exception as e: self.module.fail_json(msg="Failed to remove folder due to generic" " exception %s " % to_native(e)) self.module.exit_json(**results) def get_folder_by_name(self, folder_name): """ Function to get managed object of folder by name Returns: Managed object of folder by name """ folder_objs = get_all_objs(self.content, [vim.Folder]) for folder in folder_objs: if folder.name == folder_name: return folder return None def main(): argument_spec = vmware_argument_spec() argument_spec.update( datacenter=dict(type='str', required=True), folder_name=dict(type='str', required=True), parent_folder=dict(type='str', required=False), state=dict(type='str', choices=['present', 'absent'], default='present'), folder_type=dict(type='str', default='vm', choices=['datastore', 'host', 'network', 'vm'], required=False), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=False, ) if len(module.params.get('folder_name')) > 79: module.fail_json(msg="Failed to manage folder as folder_name can only contain 80 characters.") vcenter_folder_mgr = VmwareFolderManager(module) vcenter_folder_mgr.ensure() if __name__ == "__main__": main()
39.76378
128
0.629307
from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = r''' --- module: vcenter_folder short_description: Manage folders on given datacenter description: - This module can be used to create, delete, move and rename folder on then given datacenter. version_added: '2.5' author: - Abhijeet Kasurde (@akasurde) notes: - Tested on vSphere 6.5 requirements: - python >= 2.6 - PyVmomi options: datacenter: description: - Name of the datacenter. required: True folder_name: description: - Name of folder to be managed. - This is case sensitive parameter. - Folder name should be under 80 characters. This is a VMware restriction. required: True parent_folder: description: - Name of the parent folder under which new folder needs to be created. - This is case sensitive parameter. - Please specify unique folder name as there is no way to detect duplicate names. - "If user wants to create a folder under '/DC0/vm/vm_folder', this value will be 'vm_folder'." required: False folder_type: description: - This is type of folder. - "If set to C(vm), then 'VM and Template Folder' is created under datacenter." - "If set to C(host), then 'Host and Cluster Folder' is created under datacenter." - "If set to C(datastore), then 'Storage Folder' is created under datacenter." - "If set to C(network), then 'Network Folder' is created under datacenter." - This parameter is required, if C(state) is set to C(present) and parent_folder is absent. - This option is ignored, if C(parent_folder) is set. default: vm required: False choices: [ datastore, host, network, vm ] state: description: - State of folder. - If set to C(present) without parent folder parameter, then folder with C(folder_type) is created. - If set to C(present) with parent folder parameter, then folder in created under parent folder. C(folder_type) is ignored. - If set to C(absent), then folder is unregistered and destroyed. default: present choices: [ present, absent ] extends_documentation_fragment: vmware.documentation ''' EXAMPLES = r''' - name: Create a VM folder on given datacenter vcenter_folder: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' datacenter: datacenter_name folder_name: sample_vm_folder folder_type: vm state: present register: vm_folder_creation_result - name: Create a datastore folder on given datacenter vcenter_folder: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' datacenter: datacenter_name folder_name: sample_datastore_folder folder_type: datastore state: present register: datastore_folder_creation_result - name: Create a sub folder under VM folder on given datacenter vcenter_folder: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' datacenter: datacenter_name folder_name: sample_sub_folder parent_folder: vm_folder state: present register: sub_folder_creation_result - name: Delete a VM folder on given datacenter vcenter_folder: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' datacenter: datacenter_name folder_name: sample_vm_folder folder_type: vm state: absent register: vm_folder_deletion_result ''' RETURN = r''' result: description: - string stating about result returned: success type: string sample: "Folder 'sub_network_folder' of type 'vm' created under vm_folder successfully." ''' try: from pyVmomi import vim, vmodl except ImportError as e: pass from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi, find_datacenter_by_name, wait_for_task, get_all_objs from ansible.module_utils._text import to_native class VmwareFolderManager(PyVmomi): def __init__(self, module): super(VmwareFolderManager, self).__init__(module) datacenter_name = self.params.get('datacenter', None) self.datacenter_obj = find_datacenter_by_name(self.content, datacenter_name=datacenter_name) if self.datacenter_obj is None: self.module.fail_json(msg="Failed to find datacenter %s" % datacenter_name) def ensure(self): state = self.module.params.get('state') folder_type = self.module.params.get('folder_type') folder_name = self.module.params.get('folder_name') parent_folder = self.module.params.get('parent_folder', None) results = dict(changed=False, result=dict()) if state == 'present': try: if parent_folder: folder = self.get_folder_by_name(folder_name=parent_folder) if folder: folder.CreateFolder(folder_name) results['changed'] = True results['result'] = "Folder '%s' of type '%s' created under %s" \ " successfully." % (folder_name, folder_type, parent_folder) else: self.module.fail_json(msg="Failed to find the parent folder %s" " for folder %s" % (parent_folder, folder_name)) else: datacenter_folder_type = { 'vm': self.datacenter_obj.vmFolder, 'host': self.datacenter_obj.hostFolder, 'datastore': self.datacenter_obj.datastoreFolder, 'network': self.datacenter_obj.networkFolder, } datacenter_folder_type[folder_type].CreateFolder(folder_name) results['changed'] = True results['result'] = "Folder '%s' of type '%s' created successfully" % (folder_name, folder_type) except vim.fault.DuplicateName as duplicate_name: s['changed'] = False results['result'] = "Failed to create folder as another object has same name" \ " in the same target folder : %s" % to_native(duplicate_name.msg) except vim.fault.InvalidName as invalid_name: self.module.fail_json(msg="Failed to create folder as folder name is not a valid " "entity name : %s" % to_native(invalid_name.msg)) except Exception as general_exc: self.module.fail_json(msg="Failed to create folder due to generic" " exception : %s " % to_native(general_exc)) self.module.exit_json(**results) elif state == 'absent': folder_obj = self.get_folder_by_name(folder_name=folder_name) if folder_obj: try: task = folder_obj.UnregisterAndDestroy() results['changed'], results['result'] = wait_for_task(task=task) except vim.fault.ConcurrentAccess as concurrent_access: self.module.fail_json(msg="Failed to remove folder as another client" " modified folder before this operation : %s" % to_native(concurrent_access.msg)) except vim.fault.InvalidState as invalid_state: self.module.fail_json(msg="Failed to remove folder as folder is in" " invalid state" % to_native(invalid_state.msg)) except Exception as e: self.module.fail_json(msg="Failed to remove folder due to generic" " exception %s " % to_native(e)) self.module.exit_json(**results) def get_folder_by_name(self, folder_name): folder_objs = get_all_objs(self.content, [vim.Folder]) for folder in folder_objs: if folder.name == folder_name: return folder return None def main(): argument_spec = vmware_argument_spec() argument_spec.update( datacenter=dict(type='str', required=True), folder_name=dict(type='str', required=True), parent_folder=dict(type='str', required=False), state=dict(type='str', choices=['present', 'absent'], default='present'), folder_type=dict(type='str', default='vm', choices=['datastore', 'host', 'network', 'vm'], required=False), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=False, ) if len(module.params.get('folder_name')) > 79: module.fail_json(msg="Failed to manage folder as folder_name can only contain 80 characters.") vcenter_folder_mgr = VmwareFolderManager(module) vcenter_folder_mgr.ensure() if __name__ == "__main__": main()
true
true
f702a18d54a46af8e2dc11e1153545101f920c8d
10,971
py
Python
disturbance/components/approvals/email.py
KellyThomas/disturbance
d2096d39a199dc615b430dc43b1d0e05fab4c35f
[ "Apache-2.0" ]
null
null
null
disturbance/components/approvals/email.py
KellyThomas/disturbance
d2096d39a199dc615b430dc43b1d0e05fab4c35f
[ "Apache-2.0" ]
null
null
null
disturbance/components/approvals/email.py
KellyThomas/disturbance
d2096d39a199dc615b430dc43b1d0e05fab4c35f
[ "Apache-2.0" ]
null
null
null
import logging from django.core.mail import EmailMultiAlternatives, EmailMessage from django.utils.encoding import smart_text from django.core.urlresolvers import reverse from django.conf import settings from disturbance.components.emails.emails import TemplateEmailBase from ledger.accounts.models import EmailUser logger = logging.getLogger(__name__) SYSTEM_NAME = settings.SYSTEM_NAME_SHORT + ' Automated Message' class ApprovalExpireNotificationEmail(TemplateEmailBase): subject = 'Your Approval has expired.' html_template = 'disturbance/emails/approval_expire_notification.html' txt_template = 'disturbance/emails/approval_expire_notification.txt' class ApprovalCancelNotificationEmail(TemplateEmailBase): subject = 'Your Approval has been cancelled.' html_template = 'disturbance/emails/approval_cancel_notification.html' txt_template = 'disturbance/emails/approval_cancel_notification.txt' class ApprovalSuspendNotificationEmail(TemplateEmailBase): subject = 'Your Approval has been suspended.' html_template = 'disturbance/emails/approval_suspend_notification.html' txt_template = 'disturbance/emails/approval_suspend_notification.txt' class ApprovalSurrenderNotificationEmail(TemplateEmailBase): subject = 'Your Approval has been surrendered.' html_template = 'disturbance/emails/approval_surrender_notification.html' txt_template = 'disturbance/emails/approval_surrender_notification.txt' class ApprovalReinstateNotificationEmail(TemplateEmailBase): subject = 'Your Approval has been reinstated.' html_template = 'disturbance/emails/approval_reinstate_notification.html' txt_template = 'disturbance/emails/approval_reinstate_notification.txt' class ApprovalRenewalNotificationEmail(TemplateEmailBase): subject = 'Your Approval is due for renewal.' html_template = 'disturbance/emails/approval_renewal_notification.html' txt_template = 'disturbance/emails/approval_renewal_notification.txt' def send_approval_expire_email_notification(approval): email = ApprovalExpireNotificationEmail() proposal = approval.current_proposal context = { 'approval': approval, 'proposal': proposal } all_ccs = [] if proposal.applicant.email: cc_list = proposal.applicant.email if cc_list: all_ccs = [cc_list] msg = email.send(proposal.submitter.email,cc=all_ccs, context=context) sender = settings.DEFAULT_FROM_EMAIL try: sender_user = EmailUser.objects.get(email__icontains=sender) except: EmailUser.objects.create(email=sender, password='') sender_user = EmailUser.objects.get(email__icontains=sender) _log_approval_email(msg, approval, sender=sender_user) _log_org_email(msg, proposal.applicant, proposal.submitter, sender=sender_user) def send_approval_cancel_email_notification(approval, future_cancel=False): email = ApprovalCancelNotificationEmail() proposal = approval.current_proposal context = { 'approval': approval, 'future_cancel': future_cancel } all_ccs = [] if proposal.applicant.email: cc_list = proposal.applicant.email if cc_list: all_ccs = [cc_list] sender = settings.DEFAULT_FROM_EMAIL try: sender_user = EmailUser.objects.get(email__icontains=sender) except: EmailUser.objects.create(email=sender, password='') sender_user = EmailUser.objects.get(email__icontains=sender) msg = email.send(proposal.submitter.email, cc=all_ccs, context=context) sender = settings.DEFAULT_FROM_EMAIL _log_approval_email(msg, approval, sender=sender_user) _log_org_email(msg, proposal.applicant, proposal.submitter, sender=sender_user) def send_approval_suspend_email_notification(approval, future_suspend=False): email = ApprovalSuspendNotificationEmail() proposal = approval.current_proposal context = { 'approval': approval, 'details': approval.suspension_details['details'], 'from_date': approval.suspension_details['from_date'], 'to_date': approval.suspension_details['to_date'], 'future_suspend': future_suspend } all_ccs = [] if proposal.applicant.email: cc_list = proposal.applicant.email if cc_list: all_ccs = [cc_list] sender = settings.DEFAULT_FROM_EMAIL try: sender_user = EmailUser.objects.get(email__icontains=sender) except: EmailUser.objects.create(email=sender, password='') sender_user = EmailUser.objects.get(email__icontains=sender) msg = email.send(proposal.submitter.email, cc=all_ccs, context=context) sender = settings.DEFAULT_FROM_EMAIL _log_approval_email(msg, approval, sender=sender_user) _log_org_email(msg, proposal.applicant, proposal.submitter, sender=sender_user) def send_approval_surrender_email_notification(approval, future_surrender=False): email = ApprovalSurrenderNotificationEmail() proposal = approval.current_proposal context = { 'approval': approval, 'details': approval.surrender_details['details'], 'surrender_date': approval.surrender_details['surrender_date'], 'future_surrender': future_surrender } all_ccs = [] if proposal.applicant.email: cc_list = proposal.applicant.email if cc_list: all_ccs = [cc_list] sender = settings.DEFAULT_FROM_EMAIL try: sender_user = EmailUser.objects.get(email__icontains=sender) except: EmailUser.objects.create(email=sender, password='') sender_user = EmailUser.objects.get(email__icontains=sender) msg = email.send(proposal.submitter.email, cc=all_ccs, context=context) _log_approval_email(msg, approval, sender=sender_user) _log_org_email(msg, proposal.applicant, proposal.submitter, sender=sender_user) #approval renewal notice def send_approval_renewal_email_notification(approval): email = ApprovalRenewalNotificationEmail() proposal = approval.current_proposal context = { 'approval': approval, 'proposal': approval.current_proposal } all_ccs = [] if proposal.applicant.email: cc_list = proposal.applicant.email if cc_list: all_ccs = [cc_list] sender = settings.DEFAULT_FROM_EMAIL try: sender_user = EmailUser.objects.get(email__icontains=sender) except: EmailUser.objects.create(email=sender, password='') sender_user = EmailUser.objects.get(email__icontains=sender) #attach renewal notice renewal_document= approval.renewal_document._file if renewal_document is not None: file_name = approval.renewal_document.name attachment = (file_name, renewal_document.file.read(), 'application/pdf') attachment = [attachment] else: attachment = [] msg = email.send(proposal.submitter.email, cc=all_ccs, attachments=attachment, context=context) sender = settings.DEFAULT_FROM_EMAIL _log_approval_email(msg, approval, sender=sender_user) _log_org_email(msg, proposal.applicant, proposal.submitter, sender=sender_user) def send_approval_reinstate_email_notification(approval, request): email = ApprovalReinstateNotificationEmail() proposal = approval.current_proposal context = { 'approval': approval, } all_ccs = [] if proposal.applicant.email: cc_list = proposal.applicant.email if cc_list: all_ccs = [cc_list] msg = email.send(proposal.submitter.email,cc=all_ccs, context=context) sender = request.user if request else settings.DEFAULT_FROM_EMAIL _log_approval_email(msg, approval, sender=sender) _log_org_email(msg, proposal.applicant, proposal.submitter, sender=sender) def _log_approval_email(email_message, approval, sender=None): from disturbance.components.approvals.models import ApprovalLogEntry if isinstance(email_message, (EmailMultiAlternatives, EmailMessage,)): # TODO this will log the plain text body, should we log the html instead text = email_message.body subject = email_message.subject fromm = smart_text(sender) if sender else smart_text(email_message.from_email) # the to email is normally a list if isinstance(email_message.to, list): to = ','.join(email_message.to) else: to = smart_text(email_message.to) # we log the cc and bcc in the same cc field of the log entry as a ',' comma separated string all_ccs = [] if email_message.cc: all_ccs += list(email_message.cc) if email_message.bcc: all_ccs += list(email_message.bcc) all_ccs = ','.join(all_ccs) else: text = smart_text(email_message) subject = '' to = approval.current_proposal.submitter.email fromm = smart_text(sender) if sender else SYSTEM_NAME all_ccs = '' customer = approval.current_proposal.submitter staff = sender kwargs = { 'subject': subject, 'text': text, 'approval': approval, 'customer': customer, 'staff': staff, 'to': to, 'fromm': fromm, 'cc': all_ccs } email_entry = ApprovalLogEntry.objects.create(**kwargs) return email_entry def _log_org_email(email_message, organisation, customer ,sender=None): from disturbance.components.organisations.models import OrganisationLogEntry if isinstance(email_message, (EmailMultiAlternatives, EmailMessage,)): # TODO this will log the plain text body, should we log the html instead text = email_message.body subject = email_message.subject fromm = smart_text(sender) if sender else smart_text(email_message.from_email) # the to email is normally a list if isinstance(email_message.to, list): to = ','.join(email_message.to) else: to = smart_text(email_message.to) # we log the cc and bcc in the same cc field of the log entry as a ',' comma separated string all_ccs = [] if email_message.cc: all_ccs += list(email_message.cc) if email_message.bcc: all_ccs += list(email_message.bcc) all_ccs = ','.join(all_ccs) else: text = smart_text(email_message) subject = '' to = customer fromm = smart_text(sender) if sender else SYSTEM_NAME all_ccs = '' customer = customer staff = sender kwargs = { 'subject': subject, 'text': text, 'organisation': organisation, 'customer': customer, 'staff': staff, 'to': to, 'fromm': fromm, 'cc': all_ccs } email_entry = OrganisationLogEntry.objects.create(**kwargs) return email_entry
35.970492
101
0.700939
import logging from django.core.mail import EmailMultiAlternatives, EmailMessage from django.utils.encoding import smart_text from django.core.urlresolvers import reverse from django.conf import settings from disturbance.components.emails.emails import TemplateEmailBase from ledger.accounts.models import EmailUser logger = logging.getLogger(__name__) SYSTEM_NAME = settings.SYSTEM_NAME_SHORT + ' Automated Message' class ApprovalExpireNotificationEmail(TemplateEmailBase): subject = 'Your Approval has expired.' html_template = 'disturbance/emails/approval_expire_notification.html' txt_template = 'disturbance/emails/approval_expire_notification.txt' class ApprovalCancelNotificationEmail(TemplateEmailBase): subject = 'Your Approval has been cancelled.' html_template = 'disturbance/emails/approval_cancel_notification.html' txt_template = 'disturbance/emails/approval_cancel_notification.txt' class ApprovalSuspendNotificationEmail(TemplateEmailBase): subject = 'Your Approval has been suspended.' html_template = 'disturbance/emails/approval_suspend_notification.html' txt_template = 'disturbance/emails/approval_suspend_notification.txt' class ApprovalSurrenderNotificationEmail(TemplateEmailBase): subject = 'Your Approval has been surrendered.' html_template = 'disturbance/emails/approval_surrender_notification.html' txt_template = 'disturbance/emails/approval_surrender_notification.txt' class ApprovalReinstateNotificationEmail(TemplateEmailBase): subject = 'Your Approval has been reinstated.' html_template = 'disturbance/emails/approval_reinstate_notification.html' txt_template = 'disturbance/emails/approval_reinstate_notification.txt' class ApprovalRenewalNotificationEmail(TemplateEmailBase): subject = 'Your Approval is due for renewal.' html_template = 'disturbance/emails/approval_renewal_notification.html' txt_template = 'disturbance/emails/approval_renewal_notification.txt' def send_approval_expire_email_notification(approval): email = ApprovalExpireNotificationEmail() proposal = approval.current_proposal context = { 'approval': approval, 'proposal': proposal } all_ccs = [] if proposal.applicant.email: cc_list = proposal.applicant.email if cc_list: all_ccs = [cc_list] msg = email.send(proposal.submitter.email,cc=all_ccs, context=context) sender = settings.DEFAULT_FROM_EMAIL try: sender_user = EmailUser.objects.get(email__icontains=sender) except: EmailUser.objects.create(email=sender, password='') sender_user = EmailUser.objects.get(email__icontains=sender) _log_approval_email(msg, approval, sender=sender_user) _log_org_email(msg, proposal.applicant, proposal.submitter, sender=sender_user) def send_approval_cancel_email_notification(approval, future_cancel=False): email = ApprovalCancelNotificationEmail() proposal = approval.current_proposal context = { 'approval': approval, 'future_cancel': future_cancel } all_ccs = [] if proposal.applicant.email: cc_list = proposal.applicant.email if cc_list: all_ccs = [cc_list] sender = settings.DEFAULT_FROM_EMAIL try: sender_user = EmailUser.objects.get(email__icontains=sender) except: EmailUser.objects.create(email=sender, password='') sender_user = EmailUser.objects.get(email__icontains=sender) msg = email.send(proposal.submitter.email, cc=all_ccs, context=context) sender = settings.DEFAULT_FROM_EMAIL _log_approval_email(msg, approval, sender=sender_user) _log_org_email(msg, proposal.applicant, proposal.submitter, sender=sender_user) def send_approval_suspend_email_notification(approval, future_suspend=False): email = ApprovalSuspendNotificationEmail() proposal = approval.current_proposal context = { 'approval': approval, 'details': approval.suspension_details['details'], 'from_date': approval.suspension_details['from_date'], 'to_date': approval.suspension_details['to_date'], 'future_suspend': future_suspend } all_ccs = [] if proposal.applicant.email: cc_list = proposal.applicant.email if cc_list: all_ccs = [cc_list] sender = settings.DEFAULT_FROM_EMAIL try: sender_user = EmailUser.objects.get(email__icontains=sender) except: EmailUser.objects.create(email=sender, password='') sender_user = EmailUser.objects.get(email__icontains=sender) msg = email.send(proposal.submitter.email, cc=all_ccs, context=context) sender = settings.DEFAULT_FROM_EMAIL _log_approval_email(msg, approval, sender=sender_user) _log_org_email(msg, proposal.applicant, proposal.submitter, sender=sender_user) def send_approval_surrender_email_notification(approval, future_surrender=False): email = ApprovalSurrenderNotificationEmail() proposal = approval.current_proposal context = { 'approval': approval, 'details': approval.surrender_details['details'], 'surrender_date': approval.surrender_details['surrender_date'], 'future_surrender': future_surrender } all_ccs = [] if proposal.applicant.email: cc_list = proposal.applicant.email if cc_list: all_ccs = [cc_list] sender = settings.DEFAULT_FROM_EMAIL try: sender_user = EmailUser.objects.get(email__icontains=sender) except: EmailUser.objects.create(email=sender, password='') sender_user = EmailUser.objects.get(email__icontains=sender) msg = email.send(proposal.submitter.email, cc=all_ccs, context=context) _log_approval_email(msg, approval, sender=sender_user) _log_org_email(msg, proposal.applicant, proposal.submitter, sender=sender_user) def send_approval_renewal_email_notification(approval): email = ApprovalRenewalNotificationEmail() proposal = approval.current_proposal context = { 'approval': approval, 'proposal': approval.current_proposal } all_ccs = [] if proposal.applicant.email: cc_list = proposal.applicant.email if cc_list: all_ccs = [cc_list] sender = settings.DEFAULT_FROM_EMAIL try: sender_user = EmailUser.objects.get(email__icontains=sender) except: EmailUser.objects.create(email=sender, password='') sender_user = EmailUser.objects.get(email__icontains=sender) renewal_document= approval.renewal_document._file if renewal_document is not None: file_name = approval.renewal_document.name attachment = (file_name, renewal_document.file.read(), 'application/pdf') attachment = [attachment] else: attachment = [] msg = email.send(proposal.submitter.email, cc=all_ccs, attachments=attachment, context=context) sender = settings.DEFAULT_FROM_EMAIL _log_approval_email(msg, approval, sender=sender_user) _log_org_email(msg, proposal.applicant, proposal.submitter, sender=sender_user) def send_approval_reinstate_email_notification(approval, request): email = ApprovalReinstateNotificationEmail() proposal = approval.current_proposal context = { 'approval': approval, } all_ccs = [] if proposal.applicant.email: cc_list = proposal.applicant.email if cc_list: all_ccs = [cc_list] msg = email.send(proposal.submitter.email,cc=all_ccs, context=context) sender = request.user if request else settings.DEFAULT_FROM_EMAIL _log_approval_email(msg, approval, sender=sender) _log_org_email(msg, proposal.applicant, proposal.submitter, sender=sender) def _log_approval_email(email_message, approval, sender=None): from disturbance.components.approvals.models import ApprovalLogEntry if isinstance(email_message, (EmailMultiAlternatives, EmailMessage,)): text = email_message.body subject = email_message.subject fromm = smart_text(sender) if sender else smart_text(email_message.from_email) if isinstance(email_message.to, list): to = ','.join(email_message.to) else: to = smart_text(email_message.to) all_ccs = [] if email_message.cc: all_ccs += list(email_message.cc) if email_message.bcc: all_ccs += list(email_message.bcc) all_ccs = ','.join(all_ccs) else: text = smart_text(email_message) subject = '' to = approval.current_proposal.submitter.email fromm = smart_text(sender) if sender else SYSTEM_NAME all_ccs = '' customer = approval.current_proposal.submitter staff = sender kwargs = { 'subject': subject, 'text': text, 'approval': approval, 'customer': customer, 'staff': staff, 'to': to, 'fromm': fromm, 'cc': all_ccs } email_entry = ApprovalLogEntry.objects.create(**kwargs) return email_entry def _log_org_email(email_message, organisation, customer ,sender=None): from disturbance.components.organisations.models import OrganisationLogEntry if isinstance(email_message, (EmailMultiAlternatives, EmailMessage,)): text = email_message.body subject = email_message.subject fromm = smart_text(sender) if sender else smart_text(email_message.from_email) if isinstance(email_message.to, list): to = ','.join(email_message.to) else: to = smart_text(email_message.to) all_ccs = [] if email_message.cc: all_ccs += list(email_message.cc) if email_message.bcc: all_ccs += list(email_message.bcc) all_ccs = ','.join(all_ccs) else: text = smart_text(email_message) subject = '' to = customer fromm = smart_text(sender) if sender else SYSTEM_NAME all_ccs = '' customer = customer staff = sender kwargs = { 'subject': subject, 'text': text, 'organisation': organisation, 'customer': customer, 'staff': staff, 'to': to, 'fromm': fromm, 'cc': all_ccs } email_entry = OrganisationLogEntry.objects.create(**kwargs) return email_entry
true
true
f702a1d2e380b80b1174c5cef1e503b3d1000993
13,542
py
Python
etender_service.py
Bodyan37/robot_tests.broker.etender
be35364965d886312e0b6d803f985f2cfc981a94
[ "Apache-2.0" ]
null
null
null
etender_service.py
Bodyan37/robot_tests.broker.etender
be35364965d886312e0b6d803f985f2cfc981a94
[ "Apache-2.0" ]
null
null
null
etender_service.py
Bodyan37/robot_tests.broker.etender
be35364965d886312e0b6d803f985f2cfc981a94
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 - from iso8601 import parse_date from datetime import datetime, date, time, timedelta import dateutil.parser from pytz import timezone import os from decimal import Decimal import re TZ = timezone(os.environ['TZ'] if 'TZ' in os.environ else 'Europe/Kiev') def get_all_etender_dates(initial_tender_data): tender_period = initial_tender_data.tenderPeriod start_dt = dateutil.parser.parse(tender_period['startDate']) end_dt = dateutil.parser.parse(tender_period['endDate']) data = type('periods', (), { # dynamically creating objects instead of another dict 'tenderStart': type('date', (), {'date': start_dt.strftime("%d-%m-%Y"), 'time': start_dt.strftime("%H:%M")}), 'tenderEnd': type('date', (), {'date': end_dt.strftime("%d-%m-%Y"), 'time': end_dt.strftime("%H:%M")})}) if 'enquiryPeriod' in initial_tender_data: end_period = dateutil.parser.parse(initial_tender_data.enquiryPeriod['endDate']) data.enquiryEnd = type('date', (), {'date': end_period.strftime("%d-%m-%Y"), 'time': end_period.strftime("%H:%M")}) return data def get_procedure_type(methodType): return { 'aboveThresholdUA': 'Відкриті торги', 'belowThreshold': 'Допорогові закупівлі', 'negotiation': 'Переговорна процедура', 'aboveThresholdEU': 'Відкриті торги з публікацією англійською мовою', 'aboveThresholdUA.defense': 'Переговорна процедура для потреб оборони', 'reporting': 'Звіт про укладений договір', 'competitiveDialogueEU': 'Конкурентний діалог з публікацією англійською мовою 1-ий етап', 'competitiveDialogueUA': 'Конкурентний діалог 1-ий етап', 'open_esco': 'Відкриті торги для закупівлі енергосервісу', 'esco': 'Відкриті торги для закупівлі енергосервісу', 'closeFrameworkAgreementUA': 'Відкриті торги для укладання рамкової угоди', 'open_framework': 'Відкриті торгии для укладання рамкової угоди' }[methodType].decode('utf-8') def get_method_type(procedure_name): return { u'переговорна процедура для потреб оборони': 'aboveThresholdUA.defense', u'допорогові закупівлі': 'belowThreshold', u'відкриті торги з публікацією англійською мовою': 'aboveThresholdEU', u'переговорна процедура': 'negotiation', u'відкриті торги': 'aboveThresholdUA', u'конкурентний діалог 1-ий етап': 'competitiveDialogueUA', u'конкурентний діалог 2-ий етап': 'competitiveDialogueUA.stage2', u'звіт про укладений договір': 'reporting', u'відкриті торги для закупівлі енергосервісу': 'open_esco', u'відкриті торги для закупівлі енергосервісу': 'esco', u'конкурентний діалог з публікацією англійською мовою 1-ий етап': 'competitiveDialogueEU', u'конкурентний діалог з публікацією англійською мовою 2-ий етап': 'competitiveDialogueEU.stage2', u'відкриті торги для укладання рамкової угоди': 'closeFrameworkAgreementUA', u'відкриті торгии для укладання рамкової угоди': 'open_framework' }[procedure_name] def parse_etender_date(date, as_string=False): # converts date from ui to datetime d = datetime.strptime(date, '%d-%m-%Y, %H:%M') if as_string: return str(d) return d def cut_letters_and_parse_etender_date(date, as_string=True): # converts date from ui d = datetime.strptime(date.split(' ')[1], '%d-%m-%Y') if as_string: return str(d) return d def prepare_locator_to_scroll(locator): if locator[:3] == 'id=': return '//*[@id="{}"]'.format(locator[3:]) return locator[6:].replace("'", '"') # 6 for xpath= def to_iso(date): return date.isoformat() def convert_etender_date_to_iso_format(date): return TZ.localize(parse_etender_date(date)).isoformat() def convet_fra_to_variable(raw): b = re.findall(r'P(\d+)Y(\d+)M(\d+)D.*', raw) c, d, e = b[0] return c, d, e def convet_raw_to_chack(raw): raw = raw.replace(' ', '') b = re.findall(r'(\d+)р(\d+)м(\d+)д', raw) c, d, e = b[0] return c, d, e def get_year_from_full_date(string): data_as_str = string.split('T')[0] data_as_datetime = datetime.strptime(data_as_str, '%Y-%m-%d') return str(data_as_datetime.year) def convert_date_to_etender_format(isodate): iso_dt = parse_date(isodate) date_string = iso_dt.strftime("%d-%m-%Y") return date_string def convert_datetime_for_delivery(isodate): iso_dt = parse_date(isodate) date_string = iso_dt.strftime("%Y-%m-%d %H:%M") return date_string def convert_time_to_etender_format(isodate): iso_dt = parse_date(isodate) time_string = iso_dt.strftime("%H:%M") return time_string def float_to_string_2f(value): return '{:.2f}'.format(value) def float_to_string_3f(value): return '{:.3f}'.format(value) def string_to_float(string): return float(string) def change_data(initial_data): #TODO: remove redundant hardcoded values # initial_data['data']['procuringEntity']['identifier']['legalName'] = u"TenderOwner#" # initial_data['data']['procuringEntity']['identifier']['id'] = u"88008800" # initial_data['data']['procuringEntity']['name'] = u"TenderOwner#" initial_data['data']['items'][0]['deliveryAddress']['locality'] = u"м. Київ" initial_data['data']['items'][0]['deliveryAddress']['region'] = u"Київська область" initial_data['data']['procuringEntity']['address']['locality'] = u"Алупка" initial_data['data']['procuringEntity']['address']['postalCode'] = u"13531" initial_data['data']['procuringEntity']['address']['region'] = u"АР Крим" initial_data['data']['procuringEntity']['address']['streetAddress'] = u"Фрунзе, 666" initial_data['data']['procuringEntity']['contactPoint']['name'] = u"Владелец Этого Тендера" initial_data['data']['procuringEntity']['contactPoint']['telephone'] = u"613371488228" initial_data['data']['procuringEntity']['contactPoint']['url'] = u"http://e-tender.ua/" return initial_data def change_data_for_tender_owner(initial_data): initial_data['data']['procuringEntity']['identifier']['legalName'] = u"TenderOwner#" initial_data['data']['procuringEntity']['identifier']['id'] = u"88008800" initial_data['data']['procuringEntity']['name'] = u"TenderOwner#" return initial_data def change_buyers_data(initial_data): initial_data['data']['buyers'][0]['name'] = u"TenderOwner#" initial_data['data']['buyers'][0]['identifier']['id'] = u"88008800" initial_data['data']['buyers'][0]['identifier']['legalName'] = u"TenderOwner#" initial_data['data']['procuringEntity']['name'] = initial_data['data']['buyers'][0]['name'] initial_data['data']['procuringEntity']['identifier']['id'] = initial_data['data']['buyers'][0]['identifier']['id'] initial_data['data']['procuringEntity']['identifier']['legalName'] = \ initial_data['data']['buyers'][0]['identifier']['legalName'] return initial_data def convert_etender_date_to_iso_format_and_add_timezone(date): return TZ.localize(parse_etender_date(date)).isoformat() def get_time_now(): time_string = datetime.now().strftime("%H:%M") return time_string def get_date_now(): date_string = datetime.now().strftime("%d-%m-%Y") return date_string def get_date_10d_future(): date_string = (datetime.now() + timedelta(days=10)).strftime("%d-%m-%Y") return date_string def get_time_offset(add_minutes=17): _now = datetime.now() + timedelta(minutes=add_minutes) return _now.time().strftime('%H:%M') def convert_common_string_to_etender_string(string): dict = get_helper_dictionary() for key, val in dict.iteritems(): if val == string: return key return string def parse_currency_value_with_spaces(raw): # to convert raw values like '2 216 162,83 UAH' to string which is ready for conversion to float return ''.join(raw.split(' ')[:-1]).replace(',', '.') def get_minimalStep_currency(raw_value): # to get currency 'UAH' from raw values like '2 216 162,83 UAH' result_dic = raw_value.split(' ') result = result_dic[-1] return result def parse_currency_value_with_spaces_percentage(raw): # to convert raw values like '1,3244%' to string which is ready for conversion to float result = raw.replace('%', '') result = Decimal(result) result = (result / 100) result = float(result) return result def parse_currency_value_with_spaces_percentage_NBU(raw): # to convert raw values like 'Hi – 1,3244%' to string which is ready for conversion to float result = raw.split(' ', 4)[4] result = result.replace('%', '') result = Decimal(result) result = (result / 100) result = float(result) return result def convert_etender_string_to_common_string(string): return get_helper_dictionary().get(string, string) def get_helper_dictionary(): return { u"КЛАСИФІКАТОР ДК 021:2015 (CPV)": u"ДК021", u"кг.": u"кілограм", u"грн.": u"UAH", u"(з ПДВ)": True, u"з ПДВ": True, u"без ПДВ": False, # TODO: remove this temporary workaround, consult with quinta team about input data u"Дніпро": u"Дніпропетровськ", #tender statuses u'період уточнень': u'active.enquiries', u'очікування пропозицій': u'active.tendering', u'прекваліфікація': u'active.pre-qualification', u'оцінка пропозицій': u'active.pre-qualification', u'блокування перед аукціоном': u'active.pre-qualification.stand-still', u'проведення переговорів': u'active.pre-qualification.stand-still', u'перший проміжний етап': u'active.stage2.pending', u'період аукціону': u'active.auction', u'кваліфікація переможця': u'active.qualification', u'пропозиції розглянуто': u'active.awarded', u'завершена закупівля': u'complete', u'перший етап завершено': u'complete', u'закупівля не відбулась': u'unsuccessful', u'відмінена закупівля': u'cancelled', #bid statuses u'Пропозиція не дійсна': u'invalid', u"ст.35 ч. 2 п. 1": u"artContestIP", u"ст.35 ч. 2 п. 2": u"noCompetition", u"ст.35 ч. 2 п. 4": u"twiceUnsuccessful", u"ст.35 ч. 2 п. 5": u"additionalPurchase", u"ст.35 ч. 2 п. 6": u"additionalConstruction", u"ст.35 ч. 2 п. 7": u"stateLegalServices", u"Договір поки що не опубліковано": u"pending", u"Договір опубліковано": u"active", u"Переможець торгів": u"active", u"учасник виграв закупівлю": u"active", u'вимога': u'claim', u'відповідь надана': u'answered', u'задоволено': u'resolved', u'не задоволено': u'declined', u'скасована скаржником': u'cancelled', u'відхилено': u'invalid', u'залишена без відповіді': u'ignored', u'очікується кваліфікація': u'pending', u'відкликається скаржником': u'stopping', u'очікує розгляду органом оскарження': u'pending', u'Співфінансування з бюджетних коштів': u'budget', u'на розгляді': u'pending', u'Пропозиція не активована': u'invalid' } def get_feature_index(i): return {0.05: '1', 0.01: '2', 0: '3'}[i] def get_doc_type_index(i): return {'financial_documents': '1', 'qualification_documents': '2', 'eligibility_documents': '3'}.get(i, i) def convert_unit_name_to_unit_code(string): return { u"блок": u"D64", u"гектар": u"HAR", u"кілограми": u"KGM", u"кілометри": u"KMT", u"літр": u"LTR", u"лот": u"LO", u"метри квадратні": u"MTK", u"метри кубічні": u"MTQ", u"метри": u"MTR", u"місяць": u"MON", u"набір": u"SET", u"пара": u"PR", u"пачка": u"RM", u"пачок": u"NMP", u"послуга": u"E48", u"рейс": u"E54", u"тони": u"TNE", u"упаковка": u"PK", u"Флакон": u"VI", u"штуки": u"H87", u"ящик": u"BX", }.get(string, string) def convert_milestone_from_text_to_code(string): return { u"Аванс": u"prepayment", u"Пiсляоплата": u"postpayment" }.get(string, string) def convert_milestone_from_text_to_title(string): return { u"Виконання робіт": "executionOfWorks", u"Поставка товару": "deliveryOfGoods", u"Надання послуг": "submittingServices", u"Підписання договору": "signingTheContract", u"Дата подання заявки": "submissionDateOfApplications", u"Дата виставлення рахунку": "dateOfInvoicing", u"Дата закінчення звітного періоду": "endDateOfTheReportingPeriod", u"Інша подія": "anotherEvent", }.get(string, string) def convert_milestone_from_text_to_day_type(string): return { u"Робочі": "working", u"Банківські": "banking", u"Календарні": "calendar" }.get(string, string) def convert_main_procurement_category(string): return { u"Товари": "goods", u"Послуги": "services", u"Роботи": "works" }.get(string, string) def get_modulus_from_number(number): if isinstance(number, int): pass elif isinstance(number, str): number = int(number) elif isinstance(number, unicode): number = int(number) return abs(number)
35.920424
119
0.645326
from iso8601 import parse_date from datetime import datetime, date, time, timedelta import dateutil.parser from pytz import timezone import os from decimal import Decimal import re TZ = timezone(os.environ['TZ'] if 'TZ' in os.environ else 'Europe/Kiev') def get_all_etender_dates(initial_tender_data): tender_period = initial_tender_data.tenderPeriod start_dt = dateutil.parser.parse(tender_period['startDate']) end_dt = dateutil.parser.parse(tender_period['endDate']) data = type('periods', (), { 'tenderStart': type('date', (), {'date': start_dt.strftime("%d-%m-%Y"), 'time': start_dt.strftime("%H:%M")}), 'tenderEnd': type('date', (), {'date': end_dt.strftime("%d-%m-%Y"), 'time': end_dt.strftime("%H:%M")})}) if 'enquiryPeriod' in initial_tender_data: end_period = dateutil.parser.parse(initial_tender_data.enquiryPeriod['endDate']) data.enquiryEnd = type('date', (), {'date': end_period.strftime("%d-%m-%Y"), 'time': end_period.strftime("%H:%M")}) return data def get_procedure_type(methodType): return { 'aboveThresholdUA': 'Відкриті торги', 'belowThreshold': 'Допорогові закупівлі', 'negotiation': 'Переговорна процедура', 'aboveThresholdEU': 'Відкриті торги з публікацією англійською мовою', 'aboveThresholdUA.defense': 'Переговорна процедура для потреб оборони', 'reporting': 'Звіт про укладений договір', 'competitiveDialogueEU': 'Конкурентний діалог з публікацією англійською мовою 1-ий етап', 'competitiveDialogueUA': 'Конкурентний діалог 1-ий етап', 'open_esco': 'Відкриті торги для закупівлі енергосервісу', 'esco': 'Відкриті торги для закупівлі енергосервісу', 'closeFrameworkAgreementUA': 'Відкриті торги для укладання рамкової угоди', 'open_framework': 'Відкриті торгии для укладання рамкової угоди' }[methodType].decode('utf-8') def get_method_type(procedure_name): return { u'переговорна процедура для потреб оборони': 'aboveThresholdUA.defense', u'допорогові закупівлі': 'belowThreshold', u'відкриті торги з публікацією англійською мовою': 'aboveThresholdEU', u'переговорна процедура': 'negotiation', u'відкриті торги': 'aboveThresholdUA', u'конкурентний діалог 1-ий етап': 'competitiveDialogueUA', u'конкурентний діалог 2-ий етап': 'competitiveDialogueUA.stage2', u'звіт про укладений договір': 'reporting', u'відкриті торги для закупівлі енергосервісу': 'open_esco', u'відкриті торги для закупівлі енергосервісу': 'esco', u'конкурентний діалог з публікацією англійською мовою 1-ий етап': 'competitiveDialogueEU', u'конкурентний діалог з публікацією англійською мовою 2-ий етап': 'competitiveDialogueEU.stage2', u'відкриті торги для укладання рамкової угоди': 'closeFrameworkAgreementUA', u'відкриті торгии для укладання рамкової угоди': 'open_framework' }[procedure_name] def parse_etender_date(date, as_string=False): d = datetime.strptime(date, '%d-%m-%Y, %H:%M') if as_string: return str(d) return d def cut_letters_and_parse_etender_date(date, as_string=True): d = datetime.strptime(date.split(' ')[1], '%d-%m-%Y') if as_string: return str(d) return d def prepare_locator_to_scroll(locator): if locator[:3] == 'id=': return '//*[@id="{}"]'.format(locator[3:]) return locator[6:].replace("'", '"') # 6 for xpath= def to_iso(date): return date.isoformat() def convert_etender_date_to_iso_format(date): return TZ.localize(parse_etender_date(date)).isoformat() def convet_fra_to_variable(raw): b = re.findall(r'P(\d+)Y(\d+)M(\d+)D.*', raw) c, d, e = b[0] return c, d, e def convet_raw_to_chack(raw): raw = raw.replace(' ', '') b = re.findall(r'(\d+)р(\d+)м(\d+)д', raw) c, d, e = b[0] return c, d, e def get_year_from_full_date(string): data_as_str = string.split('T')[0] data_as_datetime = datetime.strptime(data_as_str, '%Y-%m-%d') return str(data_as_datetime.year) def convert_date_to_etender_format(isodate): iso_dt = parse_date(isodate) date_string = iso_dt.strftime("%d-%m-%Y") return date_string def convert_datetime_for_delivery(isodate): iso_dt = parse_date(isodate) date_string = iso_dt.strftime("%Y-%m-%d %H:%M") return date_string def convert_time_to_etender_format(isodate): iso_dt = parse_date(isodate) time_string = iso_dt.strftime("%H:%M") return time_string def float_to_string_2f(value): return '{:.2f}'.format(value) def float_to_string_3f(value): return '{:.3f}'.format(value) def string_to_float(string): return float(string) def change_data(initial_data): #TODO: remove redundant hardcoded values # initial_data['data']['procuringEntity']['identifier']['legalName'] = u"TenderOwner#" # initial_data['data']['procuringEntity']['identifier']['id'] = u"88008800" # initial_data['data']['procuringEntity']['name'] = u"TenderOwner#" initial_data['data']['items'][0]['deliveryAddress']['locality'] = u"м. Київ" initial_data['data']['items'][0]['deliveryAddress']['region'] = u"Київська область" initial_data['data']['procuringEntity']['address']['locality'] = u"Алупка" initial_data['data']['procuringEntity']['address']['postalCode'] = u"13531" initial_data['data']['procuringEntity']['address']['region'] = u"АР Крим" initial_data['data']['procuringEntity']['address']['streetAddress'] = u"Фрунзе, 666" initial_data['data']['procuringEntity']['contactPoint']['name'] = u"Владелец Этого Тендера" initial_data['data']['procuringEntity']['contactPoint']['telephone'] = u"613371488228" initial_data['data']['procuringEntity']['contactPoint']['url'] = u"http://e-tender.ua/" return initial_data def change_data_for_tender_owner(initial_data): initial_data['data']['procuringEntity']['identifier']['legalName'] = u"TenderOwner#" initial_data['data']['procuringEntity']['identifier']['id'] = u"88008800" initial_data['data']['procuringEntity']['name'] = u"TenderOwner#" return initial_data def change_buyers_data(initial_data): initial_data['data']['buyers'][0]['name'] = u"TenderOwner#" initial_data['data']['buyers'][0]['identifier']['id'] = u"88008800" initial_data['data']['buyers'][0]['identifier']['legalName'] = u"TenderOwner#" initial_data['data']['procuringEntity']['name'] = initial_data['data']['buyers'][0]['name'] initial_data['data']['procuringEntity']['identifier']['id'] = initial_data['data']['buyers'][0]['identifier']['id'] initial_data['data']['procuringEntity']['identifier']['legalName'] = \ initial_data['data']['buyers'][0]['identifier']['legalName'] return initial_data def convert_etender_date_to_iso_format_and_add_timezone(date): return TZ.localize(parse_etender_date(date)).isoformat() def get_time_now(): time_string = datetime.now().strftime("%H:%M") return time_string def get_date_now(): date_string = datetime.now().strftime("%d-%m-%Y") return date_string def get_date_10d_future(): date_string = (datetime.now() + timedelta(days=10)).strftime("%d-%m-%Y") return date_string def get_time_offset(add_minutes=17): _now = datetime.now() + timedelta(minutes=add_minutes) return _now.time().strftime('%H:%M') def convert_common_string_to_etender_string(string): dict = get_helper_dictionary() for key, val in dict.iteritems(): if val == string: return key return string def parse_currency_value_with_spaces(raw): # to convert raw values like '2 216 162,83 UAH' to string which is ready for conversion to float return ''.join(raw.split(' ')[:-1]).replace(',', '.') def get_minimalStep_currency(raw_value): # to get currency 'UAH' from raw values like '2 216 162,83 UAH' result_dic = raw_value.split(' ') result = result_dic[-1] return result def parse_currency_value_with_spaces_percentage(raw): # to convert raw values like '1,3244%' to string which is ready for conversion to float result = raw.replace('%', '') result = Decimal(result) result = (result / 100) result = float(result) return result def parse_currency_value_with_spaces_percentage_NBU(raw): # to convert raw values like 'Hi – 1,3244%' to string which is ready for conversion to float result = raw.split(' ', 4)[4] result = result.replace('%', '') result = Decimal(result) result = (result / 100) result = float(result) return result def convert_etender_string_to_common_string(string): return get_helper_dictionary().get(string, string) def get_helper_dictionary(): return { u"КЛАСИФІКАТОР ДК 021:2015 (CPV)": u"ДК021", u"кг.": u"кілограм", u"грн.": u"UAH", u"(з ПДВ)": True, u"з ПДВ": True, u"без ПДВ": False, # TODO: remove this temporary workaround, consult with quinta team about input data u"Дніпро": u"Дніпропетровськ", #tender statuses u'період уточнень': u'active.enquiries', u'очікування пропозицій': u'active.tendering', u'прекваліфікація': u'active.pre-qualification', u'оцінка пропозицій': u'active.pre-qualification', u'блокування перед аукціоном': u'active.pre-qualification.stand-still', u'проведення переговорів': u'active.pre-qualification.stand-still', u'перший проміжний етап': u'active.stage2.pending', u'період аукціону': u'active.auction', u'кваліфікація переможця': u'active.qualification', u'пропозиції розглянуто': u'active.awarded', u'завершена закупівля': u'complete', u'перший етап завершено': u'complete', u'закупівля не відбулась': u'unsuccessful', u'відмінена закупівля': u'cancelled', #bid statuses u'Пропозиція не дійсна': u'invalid', u"ст.35 ч. 2 п. 1": u"artContestIP", u"ст.35 ч. 2 п. 2": u"noCompetition", u"ст.35 ч. 2 п. 4": u"twiceUnsuccessful", u"ст.35 ч. 2 п. 5": u"additionalPurchase", u"ст.35 ч. 2 п. 6": u"additionalConstruction", u"ст.35 ч. 2 п. 7": u"stateLegalServices", u"Договір поки що не опубліковано": u"pending", u"Договір опубліковано": u"active", u"Переможець торгів": u"active", u"учасник виграв закупівлю": u"active", u'вимога': u'claim', u'відповідь надана': u'answered', u'задоволено': u'resolved', u'не задоволено': u'declined', u'скасована скаржником': u'cancelled', u'відхилено': u'invalid', u'залишена без відповіді': u'ignored', u'очікується кваліфікація': u'pending', u'відкликається скаржником': u'stopping', u'очікує розгляду органом оскарження': u'pending', u'Співфінансування з бюджетних коштів': u'budget', u'на розгляді': u'pending', u'Пропозиція не активована': u'invalid' } def get_feature_index(i): return {0.05: '1', 0.01: '2', 0: '3'}[i] def get_doc_type_index(i): return {'financial_documents': '1', 'qualification_documents': '2', 'eligibility_documents': '3'}.get(i, i) def convert_unit_name_to_unit_code(string): return { u"блок": u"D64", u"гектар": u"HAR", u"кілограми": u"KGM", u"кілометри": u"KMT", u"літр": u"LTR", u"лот": u"LO", u"метри квадратні": u"MTK", u"метри кубічні": u"MTQ", u"метри": u"MTR", u"місяць": u"MON", u"набір": u"SET", u"пара": u"PR", u"пачка": u"RM", u"пачок": u"NMP", u"послуга": u"E48", u"рейс": u"E54", u"тони": u"TNE", u"упаковка": u"PK", u"Флакон": u"VI", u"штуки": u"H87", u"ящик": u"BX", }.get(string, string) def convert_milestone_from_text_to_code(string): return { u"Аванс": u"prepayment", u"Пiсляоплата": u"postpayment" }.get(string, string) def convert_milestone_from_text_to_title(string): return { u"Виконання робіт": "executionOfWorks", u"Поставка товару": "deliveryOfGoods", u"Надання послуг": "submittingServices", u"Підписання договору": "signingTheContract", u"Дата подання заявки": "submissionDateOfApplications", u"Дата виставлення рахунку": "dateOfInvoicing", u"Дата закінчення звітного періоду": "endDateOfTheReportingPeriod", u"Інша подія": "anotherEvent", }.get(string, string) def convert_milestone_from_text_to_day_type(string): return { u"Робочі": "working", u"Банківські": "banking", u"Календарні": "calendar" }.get(string, string) def convert_main_procurement_category(string): return { u"Товари": "goods", u"Послуги": "services", u"Роботи": "works" }.get(string, string) def get_modulus_from_number(number): if isinstance(number, int): pass elif isinstance(number, str): number = int(number) elif isinstance(number, unicode): number = int(number) return abs(number)
true
true
f702a1da97b07980eb0080e9eb0cd536cdc7c88d
28,785
py
Python
python/ray/tests/test_runtime_env.py
RaphaelCS/ray
5f4d9085d2452186bff563fb2856e643c4c82095
[ "Apache-2.0" ]
1
2022-02-24T02:38:04.000Z
2022-02-24T02:38:04.000Z
python/ray/tests/test_runtime_env.py
swag1ong/ray
fdbeef604692aa308973988b32405ec0d70f9f40
[ "Apache-2.0" ]
null
null
null
python/ray/tests/test_runtime_env.py
swag1ong/ray
fdbeef604692aa308973988b32405ec0d70f9f40
[ "Apache-2.0" ]
null
null
null
import os import pytest import sys import random import tempfile import requests from pathlib import Path import ray from ray.test_utils import (run_string_as_driver, run_string_as_driver_nonblocking) from ray._private.utils import (get_wheel_filename, get_master_wheel_url, get_release_wheel_url) import ray.experimental.internal_kv as kv from time import sleep driver_script = """ from time import sleep import sys import logging sys.path.insert(0, "{working_dir}") import ray import ray.util import os try: import test_module except: pass try: job_config = ray.job_config.JobConfig( runtime_env={runtime_env} ) if not job_config.runtime_env: job_config=None if os.environ.get("USE_RAY_CLIENT"): ray.client("{address}").env({runtime_env}).namespace("").connect() else: ray.init(address="{address}", job_config=job_config, logging_level=logging.DEBUG, namespace="" ) except ValueError: print("ValueError") sys.exit(0) except TypeError: print("TypeError") sys.exit(0) except: print("ERROR") sys.exit(0) if os.environ.get("EXIT_AFTER_INIT"): sys.exit(0) @ray.remote def run_test(): return test_module.one() @ray.remote def check_file(name): try: with open(name) as f: return f.read() except: return "FAILED" @ray.remote class TestActor(object): @ray.method(num_returns=1) def one(self): return test_module.one() {execute_statement} if os.environ.get("USE_RAY_CLIENT"): ray.util.disconnect() else: ray.shutdown() sleep(10) """ def create_file(p): if not p.parent.exists(): p.parent.mkdir() with p.open("w") as f: f.write("Test") @pytest.fixture(scope="function") def working_dir(): with tempfile.TemporaryDirectory() as tmp_dir: path = Path(tmp_dir) module_path = path / "test_module" module_path.mkdir(parents=True) init_file = module_path / "__init__.py" test_file = module_path / "test.py" with test_file.open(mode="w") as f: f.write(""" def one(): return 1 """) with init_file.open(mode="w") as f: f.write(""" from test_module.test import one """) old_dir = os.getcwd() os.chdir(tmp_dir) yield tmp_dir os.chdir(old_dir) def start_client_server(cluster, client_mode): from ray._private.runtime_env import PKG_DIR if not client_mode: return (cluster.address, {}, PKG_DIR) ray.worker._global_node._ray_params.ray_client_server_port = "10003" ray.worker._global_node.start_ray_client_server() return ("localhost:10003", {"USE_RAY_CLIENT": "1"}, PKG_DIR) @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") def test_travel(): import uuid with tempfile.TemporaryDirectory() as tmp_dir: dir_paths = set() file_paths = set() item_num = 0 excludes = [] root = Path(tmp_dir) / "test" def construct(path, excluded=False, depth=0): nonlocal item_num path.mkdir(parents=True) if not excluded: dir_paths.add(str(path)) if depth > 8: return if item_num > 500: return dir_num = random.randint(0, 10) file_num = random.randint(0, 10) for _ in range(dir_num): uid = str(uuid.uuid4()).split("-")[0] dir_path = path / uid exclud_sub = random.randint(0, 5) == 0 if not excluded and exclud_sub: excludes.append(str(dir_path.relative_to(root))) if not excluded: construct(dir_path, exclud_sub or excluded, depth + 1) item_num += 1 if item_num > 1000: return for _ in range(file_num): uid = str(uuid.uuid4()).split("-")[0] with (path / uid).open("w") as f: v = random.randint(0, 1000) f.write(str(v)) if not excluded: if random.randint(0, 5) == 0: excludes.append( str((path / uid).relative_to(root))) else: file_paths.add((str(path / uid), str(v))) item_num += 1 construct(root) exclude_spec = ray._private.runtime_env._get_excludes(root, excludes) visited_dir_paths = set() visited_file_paths = set() def handler(path): if path.is_dir(): visited_dir_paths.add(str(path)) else: with open(path) as f: visited_file_paths.add((str(path), f.read())) ray._private.runtime_env._dir_travel(root, [exclude_spec], handler) assert file_paths == visited_file_paths assert dir_paths == visited_dir_paths """ The following test cases are related with runtime env. It following these steps 1) Creating a temporary dir with fixture working_dir 2) Using a template named driver_script defined globally 3) Overwrite runtime_env and execute_statement in the template 4) Execute it as a separate driver and return the result """ @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_empty_working_dir(ray_start_cluster_head, client_mode): cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, client_mode) env["EXIT_AFTER_INIT"] = "1" with tempfile.TemporaryDirectory() as working_dir: runtime_env = f"""{{ "working_dir": r"{working_dir}", "py_modules": [r"{working_dir}"] }}""" # Execute the following cmd in driver with runtime_env execute_statement = "sys.exit(0)" script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out != "ERROR" @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_invalid_working_dir(ray_start_cluster_head, working_dir, client_mode): cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, client_mode) env["EXIT_AFTER_INIT"] = "1" runtime_env = "{ 'working_dir': 10 }" # Execute the following cmd in driver with runtime_env execute_statement = "" script = driver_script.format(**locals()) out = run_string_as_driver(script, env).strip().split()[-1] assert out == "TypeError" runtime_env = "{ 'py_modules': [10] }" # Execute the following cmd in driver with runtime_env execute_statement = "" script = driver_script.format(**locals()) out = run_string_as_driver(script, env).strip().split()[-1] assert out == "TypeError" runtime_env = f"{{ 'working_dir': os.path.join(r'{working_dir}', 'na') }}" # Execute the following cmd in driver with runtime_env execute_statement = "" script = driver_script.format(**locals()) out = run_string_as_driver(script, env).strip().split()[-1] assert out == "ValueError" runtime_env = f"{{ 'py_modules': [os.path.join(r'{working_dir}', 'na')] }}" # Execute the following cmd in driver with runtime_env execute_statement = "" script = driver_script.format(**locals()) out = run_string_as_driver(script, env).strip().split()[-1] assert out == "ValueError" @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_single_node(ray_start_cluster_head, working_dir, client_mode): cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, client_mode) # Setup runtime env here runtime_env = f"""{{ "working_dir": "{working_dir}" }}""" # Execute the following cmd in driver with runtime_env execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))" script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == "1000" assert len(list(Path(PKG_DIR).iterdir())) == 1 assert len(kv._internal_kv_list("gcs://")) == 0 @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_two_node(two_node_cluster, working_dir, client_mode): cluster, _ = two_node_cluster (address, env, PKG_DIR) = start_client_server(cluster, client_mode) # Testing runtime env with working_dir runtime_env = f"""{{ "working_dir": "{working_dir}" }}""" # Execute the following cmd in driver with runtime_env execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))" script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == "1000" assert len(list(Path(PKG_DIR).iterdir())) == 1 assert len(kv._internal_kv_list("gcs://")) == 0 @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_two_node_module(two_node_cluster, working_dir, client_mode): cluster, _ = two_node_cluster (address, env, PKG_DIR) = start_client_server(cluster, client_mode) # test runtime_env iwth py_modules runtime_env = """{ "py_modules": [test_module.__path__[0]] }""" # Execute the following cmd in driver with runtime_env execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))" script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == "1000" assert len(list(Path(PKG_DIR).iterdir())) == 1 @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_two_node_local_file(two_node_cluster, working_dir, client_mode): with open(os.path.join(working_dir, "test_file"), "w") as f: f.write("1") cluster, _ = two_node_cluster (address, env, PKG_DIR) = start_client_server(cluster, client_mode) # test runtime_env iwth working_dir runtime_env = f"""{{ "working_dir": "{working_dir}" }}""" # Execute the following cmd in driver with runtime_env execute_statement = """ vals = ray.get([check_file.remote('test_file')] * 1000) print(sum([int(v) for v in vals])) """ script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == "1000" assert len(list(Path(PKG_DIR).iterdir())) == 1 assert len(kv._internal_kv_list("gcs://")) == 0 @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_exclusion(ray_start_cluster_head, working_dir, client_mode): cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, client_mode) working_path = Path(working_dir) create_file(working_path / "tmp_dir" / "test_1") create_file(working_path / "tmp_dir" / "test_2") create_file(working_path / "tmp_dir" / "test_3") create_file(working_path / "tmp_dir" / "sub_dir" / "test_1") create_file(working_path / "tmp_dir" / "sub_dir" / "test_2") create_file(working_path / "test1") create_file(working_path / "test2") create_file(working_path / "test3") tmp_dir_test_3 = str((working_path / "tmp_dir" / "test_3").absolute()) runtime_env = f"""{{ "working_dir": r"{working_dir}", }}""" execute_statement = """ vals = ray.get([ check_file.remote('test1'), check_file.remote('test2'), check_file.remote('test3'), check_file.remote(os.path.join('tmp_dir', 'test_1')), check_file.remote(os.path.join('tmp_dir', 'test_2')), check_file.remote(os.path.join('tmp_dir', 'test_3')), check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_1')), check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_2')), ]) print(','.join(vals)) """ script = driver_script.format(**locals()) out = run_string_as_driver(script, env) # Test it works before assert out.strip().split("\n")[-1] == \ "Test,Test,Test,Test,Test,Test,Test,Test" runtime_env = f"""{{ "working_dir": r"{working_dir}", "excludes": [ # exclude by relative path r"test2", # exclude by dir r"{str(Path("tmp_dir") / "sub_dir")}", # exclude part of the dir r"{str(Path("tmp_dir") / "test_1")}", # exclude part of the dir r"{str(Path("tmp_dir") / "test_2")}", ] }}""" script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split("\n")[-1] == \ "Test,FAILED,Test,FAILED,FAILED,Test,FAILED,FAILED" @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_exclusion_2(ray_start_cluster_head, working_dir, client_mode): cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, client_mode) working_path = Path(working_dir) def create_file(p): if not p.parent.exists(): p.parent.mkdir(parents=True) with p.open("w") as f: f.write("Test") create_file(working_path / "tmp_dir" / "test_1") create_file(working_path / "tmp_dir" / "test_2") create_file(working_path / "tmp_dir" / "test_3") create_file(working_path / "tmp_dir" / "sub_dir" / "test_1") create_file(working_path / "tmp_dir" / "sub_dir" / "test_2") create_file(working_path / "test1") create_file(working_path / "test2") create_file(working_path / "test3") create_file(working_path / "cache" / "test_1") create_file(working_path / "tmp_dir" / "cache" / "test_1") create_file(working_path / "another_dir" / "cache" / "test_1") tmp_dir_test_3 = str((working_path / "tmp_dir" / "test_3").absolute()) runtime_env = f"""{{ "working_dir": r"{working_dir}", }}""" execute_statement = """ vals = ray.get([ check_file.remote('test1'), check_file.remote('test2'), check_file.remote('test3'), check_file.remote(os.path.join('tmp_dir', 'test_1')), check_file.remote(os.path.join('tmp_dir', 'test_2')), check_file.remote(os.path.join('tmp_dir', 'test_3')), check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_1')), check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_2')), check_file.remote(os.path.join("cache", "test_1")), check_file.remote(os.path.join("tmp_dir", "cache", "test_1")), check_file.remote(os.path.join("another_dir", "cache", "test_1")), ]) print(','.join(vals)) """ script = driver_script.format(**locals()) out = run_string_as_driver(script, env) # Test it works before assert out.strip().split("\n")[-1] == \ "Test,Test,Test,Test,Test,Test,Test,Test,Test,Test,Test" with open(f"{working_dir}/.gitignore", "w") as f: f.write(""" # Comment test_[12] /test1 !/tmp_dir/sub_dir/test_1 cache/ """) script = driver_script.format(**locals()) out = run_string_as_driver(script, env) t = out.strip().split("\n")[-1] assert out.strip().split("\n")[-1] == \ "FAILED,Test,Test,FAILED,FAILED,Test,Test,FAILED,FAILED,FAILED,FAILED" @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_runtime_env_getter(ray_start_cluster_head, working_dir, client_mode): cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, client_mode) runtime_env = f"""{{ "working_dir": "{working_dir}" }}""" # Execute the following cmd in driver with runtime_env execute_statement = """ print(ray.get_runtime_context().runtime_env["working_dir"]) """ script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == working_dir @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_two_node_uri(two_node_cluster, working_dir, client_mode): cluster, _ = two_node_cluster (address, env, PKG_DIR) = start_client_server(cluster, client_mode) import ray._private.runtime_env as runtime_env import tempfile with tempfile.NamedTemporaryFile(suffix="zip") as tmp_file: pkg_name = runtime_env.get_project_package_name(working_dir, [], []) pkg_uri = runtime_env.Protocol.PIN_GCS.value + "://" + pkg_name runtime_env.create_project_package(working_dir, [], [], tmp_file.name) runtime_env.push_package(pkg_uri, tmp_file.name) runtime_env = f"""{{ "uris": ["{pkg_uri}"] }}""" # Execute the following cmd in driver with runtime_env execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))" script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == "1000" assert len(list(Path(PKG_DIR).iterdir())) == 1 # pinned uri will not be deleted print(list(kv._internal_kv_list(""))) assert len(kv._internal_kv_list("pingcs://")) == 1 @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_regular_actors(ray_start_cluster_head, working_dir, client_mode): cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, client_mode) runtime_env = f"""{{ "working_dir": "{working_dir}" }}""" # Execute the following cmd in driver with runtime_env execute_statement = """ test_actor = TestActor.options(name="test_actor").remote() print(sum(ray.get([test_actor.one.remote()] * 1000))) """ script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == "1000" assert len(list(Path(PKG_DIR).iterdir())) == 1 assert len(kv._internal_kv_list("gcs://")) == 0 @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_detached_actors(ray_start_cluster_head, working_dir, client_mode): cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, client_mode) runtime_env = f"""{{ "working_dir": "{working_dir}" }}""" # Execute the following cmd in driver with runtime_env execute_statement = """ test_actor = TestActor.options(name="test_actor", lifetime="detached").remote() print(sum(ray.get([test_actor.one.remote()] * 1000))) """ script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == "1000" # It's a detached actors, so it should still be there assert len(kv._internal_kv_list("gcs://")) == 1 assert len(list(Path(PKG_DIR).iterdir())) == 2 pkg_dir = [f for f in Path(PKG_DIR).glob("*") if f.is_dir()][0] import sys sys.path.insert(0, str(pkg_dir)) test_actor = ray.get_actor("test_actor") assert sum(ray.get([test_actor.one.remote()] * 1000)) == 1000 ray.kill(test_actor) from time import sleep sleep(5) assert len(list(Path(PKG_DIR).iterdir())) == 1 assert len(kv._internal_kv_list("gcs://")) == 0 @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") def test_jobconfig_compatible_1(ray_start_cluster_head, working_dir): # start job_config=None # start job_config=something cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, True) runtime_env = None # To make the first one hanging there execute_statement = """ sleep(600) """ script = driver_script.format(**locals()) # Have one running with job config = None proc = run_string_as_driver_nonblocking(script, env) # waiting it to be up sleep(5) runtime_env = f"""{{ "working_dir": "{working_dir}" }}""" # Execute the second one which should work because Ray Client servers. execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))" script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == "1000" proc.kill() proc.wait() @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") def test_jobconfig_compatible_2(ray_start_cluster_head, working_dir): # start job_config=something # start job_config=None cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, True) runtime_env = """{ "py_modules": [test_module.__path__[0]] }""" # To make the first one hanging there execute_statement = """ sleep(600) """ script = driver_script.format(**locals()) proc = run_string_as_driver_nonblocking(script, env) sleep(5) runtime_env = None # Execute the following in the second one which should # succeed execute_statement = "print('OK')" script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == "OK", out proc.kill() proc.wait() @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") def test_jobconfig_compatible_3(ray_start_cluster_head, working_dir): # start job_config=something # start job_config=something else cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, True) runtime_env = """{ "py_modules": [test_module.__path__[0]] }""" # To make the first one hanging ther execute_statement = """ sleep(600) """ script = driver_script.format(**locals()) proc = run_string_as_driver_nonblocking(script, env) sleep(5) runtime_env = f""" {{ "working_dir": test_module.__path__[0] }}""" # noqa: F541 # Execute the following cmd in the second one and ensure that # it is able to run. execute_statement = "print('OK')" script = driver_script.format(**locals()) out = run_string_as_driver(script, env) proc.kill() proc.wait() assert out.strip().split()[-1] == "OK" @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") def test_util_without_job_config(shutdown_only): from ray.cluster_utils import Cluster with tempfile.TemporaryDirectory() as tmp_dir: with (Path(tmp_dir) / "lib.py").open("w") as f: f.write(""" def one(): return 1 """) old_dir = os.getcwd() os.chdir(tmp_dir) cluster = Cluster() cluster.add_node(num_cpus=1) ray.init(address=cluster.address) (address, env, PKG_DIR) = start_client_server(cluster, True) script = f""" import ray import ray.util import os ray.util.connect("{address}", job_config=None) @ray.remote def run(): from lib import one return one() print(ray.get([run.remote()])[0]) """ out = run_string_as_driver(script, env) print(out) os.chdir(old_dir) @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") def test_init(shutdown_only): with tempfile.TemporaryDirectory() as tmp_dir: old_dir = os.getcwd() os.chdir(tmp_dir) with open("hello", "w") as f: f.write("world") job_config = ray.job_config.JobConfig(runtime_env={"working_dir": "."}) ray.init(job_config=job_config) @ray.remote class Test: def test(self): with open("hello") as f: return f.read() t = Test.remote() assert ray.get(t.test.remote()) == "world" os.chdir(old_dir) def test_get_wheel_filename(): ray_version = "2.0.0.dev0" for sys_platform in ["darwin", "linux", "win32"]: for py_version in ["36", "37", "38"]: filename = get_wheel_filename(sys_platform, ray_version, py_version) prefix = "https://s3-us-west-2.amazonaws.com/ray-wheels/latest/" url = f"{prefix}{filename}" assert requests.head(url).status_code == 200 def test_get_master_wheel_url(): ray_version = "2.0.0.dev0" test_commit = "ba6cebe30fab6925e5b2d9e859ad064d53015246" for sys_platform in ["darwin", "linux", "win32"]: for py_version in ["36", "37", "38"]: url = get_master_wheel_url(test_commit, sys_platform, ray_version, py_version) assert requests.head(url).status_code == 200, url def test_get_release_wheel_url(): test_commits = { "1.4.0rc1": "e7c7f6371a69eb727fa469e4cd6f4fbefd143b4c", "1.3.0": "0b4b444fadcdc23226e11fef066b982175804232", "1.2.0": "1b1a2496ca51b745c07c79fb859946d3350d471b" } for sys_platform in ["darwin", "linux", "win32"]: for py_version in ["36", "37", "38"]: for version, commit in test_commits.items(): url = get_release_wheel_url(commit, sys_platform, version, py_version) assert requests.head(url).status_code == 200, url @pytest.mark.skipif( sys.platform == "win32", reason="runtime_env unsupported on Windows.") def test_decorator_task(ray_start_cluster_head): @ray.remote(runtime_env={"env_vars": {"foo": "bar"}}) def f(): return os.environ.get("foo") assert ray.get(f.remote()) == "bar" @pytest.mark.skipif( sys.platform == "win32", reason="runtime_env unsupported on Windows.") def test_decorator_actor(ray_start_cluster_head): @ray.remote(runtime_env={"env_vars": {"foo": "bar"}}) class A: def g(self): return os.environ.get("foo") a = A.remote() assert ray.get(a.g.remote()) == "bar" @pytest.mark.skipif( sys.platform == "win32", reason="runtime_env unsupported on Windows.") def test_decorator_complex(shutdown_only): ray.init( job_config=ray.job_config.JobConfig( runtime_env={"env_vars": { "foo": "job" }})) @ray.remote def env_from_job(): return os.environ.get("foo") assert ray.get(env_from_job.remote()) == "job" @ray.remote(runtime_env={"env_vars": {"foo": "task"}}) def f(): return os.environ.get("foo") assert ray.get(f.remote()) == "task" @ray.remote(runtime_env={"env_vars": {"foo": "actor"}}) class A: def g(self): return os.environ.get("foo") a = A.remote() assert ray.get(a.g.remote()) == "actor" # Test that runtime_env can be overridden by specifying .options(). assert ray.get( f.options(runtime_env={ "env_vars": { "foo": "new" } }).remote()) == "new" a = A.options(runtime_env={"env_vars": {"foo": "new2"}}).remote() assert ray.get(a.g.remote()) == "new2" def test_container_option_serialize(): runtime_env = { "container": { "image": "ray:latest", "run_options": ["--name=test"] } } job_config = ray.job_config.JobConfig(runtime_env=runtime_env) job_config_serialized = job_config.serialize() # job_config_serialized is JobConfig protobuf serialized string, # job_config.runtime_env.raw_json has container_option info # job_config.serialized_runtime_env also has container_option info assert job_config_serialized.count(b"image") == 2 def test_working_dir_override_failure(shutdown_only): ray.init() @ray.remote(runtime_env={"working_dir": "."}) def f(): pass with pytest.raises(NotImplementedError): f.remote() @ray.remote def g(): pass with pytest.raises(NotImplementedError): g.options(runtime_env={"working_dir": "."}).remote() @ray.remote(runtime_env={"working_dir": "."}) class A: pass with pytest.raises(NotImplementedError): A.remote() @ray.remote class B: pass with pytest.raises(NotImplementedError): B.options(runtime_env={"working_dir": "."}).remote() if __name__ == "__main__": import sys sys.exit(pytest.main(["-sv", __file__]))
35.537037
79
0.640021
import os import pytest import sys import random import tempfile import requests from pathlib import Path import ray from ray.test_utils import (run_string_as_driver, run_string_as_driver_nonblocking) from ray._private.utils import (get_wheel_filename, get_master_wheel_url, get_release_wheel_url) import ray.experimental.internal_kv as kv from time import sleep driver_script = """ from time import sleep import sys import logging sys.path.insert(0, "{working_dir}") import ray import ray.util import os try: import test_module except: pass try: job_config = ray.job_config.JobConfig( runtime_env={runtime_env} ) if not job_config.runtime_env: job_config=None if os.environ.get("USE_RAY_CLIENT"): ray.client("{address}").env({runtime_env}).namespace("").connect() else: ray.init(address="{address}", job_config=job_config, logging_level=logging.DEBUG, namespace="" ) except ValueError: print("ValueError") sys.exit(0) except TypeError: print("TypeError") sys.exit(0) except: print("ERROR") sys.exit(0) if os.environ.get("EXIT_AFTER_INIT"): sys.exit(0) @ray.remote def run_test(): return test_module.one() @ray.remote def check_file(name): try: with open(name) as f: return f.read() except: return "FAILED" @ray.remote class TestActor(object): @ray.method(num_returns=1) def one(self): return test_module.one() {execute_statement} if os.environ.get("USE_RAY_CLIENT"): ray.util.disconnect() else: ray.shutdown() sleep(10) """ def create_file(p): if not p.parent.exists(): p.parent.mkdir() with p.open("w") as f: f.write("Test") @pytest.fixture(scope="function") def working_dir(): with tempfile.TemporaryDirectory() as tmp_dir: path = Path(tmp_dir) module_path = path / "test_module" module_path.mkdir(parents=True) init_file = module_path / "__init__.py" test_file = module_path / "test.py" with test_file.open(mode="w") as f: f.write(""" def one(): return 1 """) with init_file.open(mode="w") as f: f.write(""" from test_module.test import one """) old_dir = os.getcwd() os.chdir(tmp_dir) yield tmp_dir os.chdir(old_dir) def start_client_server(cluster, client_mode): from ray._private.runtime_env import PKG_DIR if not client_mode: return (cluster.address, {}, PKG_DIR) ray.worker._global_node._ray_params.ray_client_server_port = "10003" ray.worker._global_node.start_ray_client_server() return ("localhost:10003", {"USE_RAY_CLIENT": "1"}, PKG_DIR) @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") def test_travel(): import uuid with tempfile.TemporaryDirectory() as tmp_dir: dir_paths = set() file_paths = set() item_num = 0 excludes = [] root = Path(tmp_dir) / "test" def construct(path, excluded=False, depth=0): nonlocal item_num path.mkdir(parents=True) if not excluded: dir_paths.add(str(path)) if depth > 8: return if item_num > 500: return dir_num = random.randint(0, 10) file_num = random.randint(0, 10) for _ in range(dir_num): uid = str(uuid.uuid4()).split("-")[0] dir_path = path / uid exclud_sub = random.randint(0, 5) == 0 if not excluded and exclud_sub: excludes.append(str(dir_path.relative_to(root))) if not excluded: construct(dir_path, exclud_sub or excluded, depth + 1) item_num += 1 if item_num > 1000: return for _ in range(file_num): uid = str(uuid.uuid4()).split("-")[0] with (path / uid).open("w") as f: v = random.randint(0, 1000) f.write(str(v)) if not excluded: if random.randint(0, 5) == 0: excludes.append( str((path / uid).relative_to(root))) else: file_paths.add((str(path / uid), str(v))) item_num += 1 construct(root) exclude_spec = ray._private.runtime_env._get_excludes(root, excludes) visited_dir_paths = set() visited_file_paths = set() def handler(path): if path.is_dir(): visited_dir_paths.add(str(path)) else: with open(path) as f: visited_file_paths.add((str(path), f.read())) ray._private.runtime_env._dir_travel(root, [exclude_spec], handler) assert file_paths == visited_file_paths assert dir_paths == visited_dir_paths @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_empty_working_dir(ray_start_cluster_head, client_mode): cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, client_mode) env["EXIT_AFTER_INIT"] = "1" with tempfile.TemporaryDirectory() as working_dir: runtime_env = f"""{{ "working_dir": r"{working_dir}", "py_modules": [r"{working_dir}"] }}""" execute_statement = "sys.exit(0)" script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out != "ERROR" @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_invalid_working_dir(ray_start_cluster_head, working_dir, client_mode): cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, client_mode) env["EXIT_AFTER_INIT"] = "1" runtime_env = "{ 'working_dir': 10 }" execute_statement = "" script = driver_script.format(**locals()) out = run_string_as_driver(script, env).strip().split()[-1] assert out == "TypeError" runtime_env = "{ 'py_modules': [10] }" execute_statement = "" script = driver_script.format(**locals()) out = run_string_as_driver(script, env).strip().split()[-1] assert out == "TypeError" runtime_env = f"{{ 'working_dir': os.path.join(r'{working_dir}', 'na') }}" execute_statement = "" script = driver_script.format(**locals()) out = run_string_as_driver(script, env).strip().split()[-1] assert out == "ValueError" runtime_env = f"{{ 'py_modules': [os.path.join(r'{working_dir}', 'na')] }}" execute_statement = "" script = driver_script.format(**locals()) out = run_string_as_driver(script, env).strip().split()[-1] assert out == "ValueError" @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_single_node(ray_start_cluster_head, working_dir, client_mode): cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, client_mode) runtime_env = f"""{{ "working_dir": "{working_dir}" }}""" execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))" script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == "1000" assert len(list(Path(PKG_DIR).iterdir())) == 1 assert len(kv._internal_kv_list("gcs://")) == 0 @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_two_node(two_node_cluster, working_dir, client_mode): cluster, _ = two_node_cluster (address, env, PKG_DIR) = start_client_server(cluster, client_mode) runtime_env = f"""{{ "working_dir": "{working_dir}" }}""" execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))" script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == "1000" assert len(list(Path(PKG_DIR).iterdir())) == 1 assert len(kv._internal_kv_list("gcs://")) == 0 @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_two_node_module(two_node_cluster, working_dir, client_mode): cluster, _ = two_node_cluster (address, env, PKG_DIR) = start_client_server(cluster, client_mode) runtime_env = """{ "py_modules": [test_module.__path__[0]] }""" execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))" script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == "1000" assert len(list(Path(PKG_DIR).iterdir())) == 1 @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_two_node_local_file(two_node_cluster, working_dir, client_mode): with open(os.path.join(working_dir, "test_file"), "w") as f: f.write("1") cluster, _ = two_node_cluster (address, env, PKG_DIR) = start_client_server(cluster, client_mode) runtime_env = f"""{{ "working_dir": "{working_dir}" }}""" execute_statement = """ vals = ray.get([check_file.remote('test_file')] * 1000) print(sum([int(v) for v in vals])) """ script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == "1000" assert len(list(Path(PKG_DIR).iterdir())) == 1 assert len(kv._internal_kv_list("gcs://")) == 0 @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_exclusion(ray_start_cluster_head, working_dir, client_mode): cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, client_mode) working_path = Path(working_dir) create_file(working_path / "tmp_dir" / "test_1") create_file(working_path / "tmp_dir" / "test_2") create_file(working_path / "tmp_dir" / "test_3") create_file(working_path / "tmp_dir" / "sub_dir" / "test_1") create_file(working_path / "tmp_dir" / "sub_dir" / "test_2") create_file(working_path / "test1") create_file(working_path / "test2") create_file(working_path / "test3") tmp_dir_test_3 = str((working_path / "tmp_dir" / "test_3").absolute()) runtime_env = f"""{{ "working_dir": r"{working_dir}", }}""" execute_statement = """ vals = ray.get([ check_file.remote('test1'), check_file.remote('test2'), check_file.remote('test3'), check_file.remote(os.path.join('tmp_dir', 'test_1')), check_file.remote(os.path.join('tmp_dir', 'test_2')), check_file.remote(os.path.join('tmp_dir', 'test_3')), check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_1')), check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_2')), ]) print(','.join(vals)) """ script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split("\n")[-1] == \ "Test,Test,Test,Test,Test,Test,Test,Test" runtime_env = f"""{{ "working_dir": r"{working_dir}", "excludes": [ # exclude by relative path r"test2", # exclude by dir r"{str(Path("tmp_dir") / "sub_dir")}", # exclude part of the dir r"{str(Path("tmp_dir") / "test_1")}", # exclude part of the dir r"{str(Path("tmp_dir") / "test_2")}", ] }}""" script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split("\n")[-1] == \ "Test,FAILED,Test,FAILED,FAILED,Test,FAILED,FAILED" @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_exclusion_2(ray_start_cluster_head, working_dir, client_mode): cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, client_mode) working_path = Path(working_dir) def create_file(p): if not p.parent.exists(): p.parent.mkdir(parents=True) with p.open("w") as f: f.write("Test") create_file(working_path / "tmp_dir" / "test_1") create_file(working_path / "tmp_dir" / "test_2") create_file(working_path / "tmp_dir" / "test_3") create_file(working_path / "tmp_dir" / "sub_dir" / "test_1") create_file(working_path / "tmp_dir" / "sub_dir" / "test_2") create_file(working_path / "test1") create_file(working_path / "test2") create_file(working_path / "test3") create_file(working_path / "cache" / "test_1") create_file(working_path / "tmp_dir" / "cache" / "test_1") create_file(working_path / "another_dir" / "cache" / "test_1") tmp_dir_test_3 = str((working_path / "tmp_dir" / "test_3").absolute()) runtime_env = f"""{{ "working_dir": r"{working_dir}", }}""" execute_statement = """ vals = ray.get([ check_file.remote('test1'), check_file.remote('test2'), check_file.remote('test3'), check_file.remote(os.path.join('tmp_dir', 'test_1')), check_file.remote(os.path.join('tmp_dir', 'test_2')), check_file.remote(os.path.join('tmp_dir', 'test_3')), check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_1')), check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_2')), check_file.remote(os.path.join("cache", "test_1")), check_file.remote(os.path.join("tmp_dir", "cache", "test_1")), check_file.remote(os.path.join("another_dir", "cache", "test_1")), ]) print(','.join(vals)) """ script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split("\n")[-1] == \ "Test,Test,Test,Test,Test,Test,Test,Test,Test,Test,Test" with open(f"{working_dir}/.gitignore", "w") as f: f.write(""" # Comment test_[12] /test1 !/tmp_dir/sub_dir/test_1 cache/ """) script = driver_script.format(**locals()) out = run_string_as_driver(script, env) t = out.strip().split("\n")[-1] assert out.strip().split("\n")[-1] == \ "FAILED,Test,Test,FAILED,FAILED,Test,Test,FAILED,FAILED,FAILED,FAILED" @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_runtime_env_getter(ray_start_cluster_head, working_dir, client_mode): cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, client_mode) runtime_env = f"""{{ "working_dir": "{working_dir}" }}""" execute_statement = """ print(ray.get_runtime_context().runtime_env["working_dir"]) """ script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == working_dir @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_two_node_uri(two_node_cluster, working_dir, client_mode): cluster, _ = two_node_cluster (address, env, PKG_DIR) = start_client_server(cluster, client_mode) import ray._private.runtime_env as runtime_env import tempfile with tempfile.NamedTemporaryFile(suffix="zip") as tmp_file: pkg_name = runtime_env.get_project_package_name(working_dir, [], []) pkg_uri = runtime_env.Protocol.PIN_GCS.value + "://" + pkg_name runtime_env.create_project_package(working_dir, [], [], tmp_file.name) runtime_env.push_package(pkg_uri, tmp_file.name) runtime_env = f"""{{ "uris": ["{pkg_uri}"] }}""" execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))" script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == "1000" assert len(list(Path(PKG_DIR).iterdir())) == 1 print(list(kv._internal_kv_list(""))) assert len(kv._internal_kv_list("pingcs://")) == 1 @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_regular_actors(ray_start_cluster_head, working_dir, client_mode): cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, client_mode) runtime_env = f"""{{ "working_dir": "{working_dir}" }}""" execute_statement = """ test_actor = TestActor.options(name="test_actor").remote() print(sum(ray.get([test_actor.one.remote()] * 1000))) """ script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == "1000" assert len(list(Path(PKG_DIR).iterdir())) == 1 assert len(kv._internal_kv_list("gcs://")) == 0 @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_detached_actors(ray_start_cluster_head, working_dir, client_mode): cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, client_mode) runtime_env = f"""{{ "working_dir": "{working_dir}" }}""" execute_statement = """ test_actor = TestActor.options(name="test_actor", lifetime="detached").remote() print(sum(ray.get([test_actor.one.remote()] * 1000))) """ script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == "1000" assert len(kv._internal_kv_list("gcs://")) == 1 assert len(list(Path(PKG_DIR).iterdir())) == 2 pkg_dir = [f for f in Path(PKG_DIR).glob("*") if f.is_dir()][0] import sys sys.path.insert(0, str(pkg_dir)) test_actor = ray.get_actor("test_actor") assert sum(ray.get([test_actor.one.remote()] * 1000)) == 1000 ray.kill(test_actor) from time import sleep sleep(5) assert len(list(Path(PKG_DIR).iterdir())) == 1 assert len(kv._internal_kv_list("gcs://")) == 0 @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") def test_jobconfig_compatible_1(ray_start_cluster_head, working_dir): # start job_config=None # start job_config=something cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, True) runtime_env = None # To make the first one hanging there execute_statement = """ sleep(600) """ script = driver_script.format(**locals()) # Have one running with job config = None proc = run_string_as_driver_nonblocking(script, env) # waiting it to be up sleep(5) runtime_env = f"""{{ "working_dir": "{working_dir}" }}""" # Execute the second one which should work because Ray Client servers. execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))" script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == "1000" proc.kill() proc.wait() @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") def test_jobconfig_compatible_2(ray_start_cluster_head, working_dir): # start job_config=something # start job_config=None cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, True) runtime_env = """{ "py_modules": [test_module.__path__[0]] }""" # To make the first one hanging there execute_statement = """ sleep(600) """ script = driver_script.format(**locals()) proc = run_string_as_driver_nonblocking(script, env) sleep(5) runtime_env = None # Execute the following in the second one which should # succeed execute_statement = "print('OK')" script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == "OK", out proc.kill() proc.wait() @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") def test_jobconfig_compatible_3(ray_start_cluster_head, working_dir): # start job_config=something # start job_config=something else cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, True) runtime_env = """{ "py_modules": [test_module.__path__[0]] }""" # To make the first one hanging ther execute_statement = """ sleep(600) """ script = driver_script.format(**locals()) proc = run_string_as_driver_nonblocking(script, env) sleep(5) runtime_env = f""" {{ "working_dir": test_module.__path__[0] }}""" # noqa: F541 # Execute the following cmd in the second one and ensure that # it is able to run. execute_statement = "print('OK')" script = driver_script.format(**locals()) out = run_string_as_driver(script, env) proc.kill() proc.wait() assert out.strip().split()[-1] == "OK" @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") def test_util_without_job_config(shutdown_only): from ray.cluster_utils import Cluster with tempfile.TemporaryDirectory() as tmp_dir: with (Path(tmp_dir) / "lib.py").open("w") as f: f.write(""" def one(): return 1 """) old_dir = os.getcwd() os.chdir(tmp_dir) cluster = Cluster() cluster.add_node(num_cpus=1) ray.init(address=cluster.address) (address, env, PKG_DIR) = start_client_server(cluster, True) script = f""" import ray import ray.util import os ray.util.connect("{address}", job_config=None) @ray.remote def run(): from lib import one return one() print(ray.get([run.remote()])[0]) """ out = run_string_as_driver(script, env) print(out) os.chdir(old_dir) @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") def test_init(shutdown_only): with tempfile.TemporaryDirectory() as tmp_dir: old_dir = os.getcwd() os.chdir(tmp_dir) with open("hello", "w") as f: f.write("world") job_config = ray.job_config.JobConfig(runtime_env={"working_dir": "."}) ray.init(job_config=job_config) @ray.remote class Test: def test(self): with open("hello") as f: return f.read() t = Test.remote() assert ray.get(t.test.remote()) == "world" os.chdir(old_dir) def test_get_wheel_filename(): ray_version = "2.0.0.dev0" for sys_platform in ["darwin", "linux", "win32"]: for py_version in ["36", "37", "38"]: filename = get_wheel_filename(sys_platform, ray_version, py_version) prefix = "https://s3-us-west-2.amazonaws.com/ray-wheels/latest/" url = f"{prefix}{filename}" assert requests.head(url).status_code == 200 def test_get_master_wheel_url(): ray_version = "2.0.0.dev0" test_commit = "ba6cebe30fab6925e5b2d9e859ad064d53015246" for sys_platform in ["darwin", "linux", "win32"]: for py_version in ["36", "37", "38"]: url = get_master_wheel_url(test_commit, sys_platform, ray_version, py_version) assert requests.head(url).status_code == 200, url def test_get_release_wheel_url(): test_commits = { "1.4.0rc1": "e7c7f6371a69eb727fa469e4cd6f4fbefd143b4c", "1.3.0": "0b4b444fadcdc23226e11fef066b982175804232", "1.2.0": "1b1a2496ca51b745c07c79fb859946d3350d471b" } for sys_platform in ["darwin", "linux", "win32"]: for py_version in ["36", "37", "38"]: for version, commit in test_commits.items(): url = get_release_wheel_url(commit, sys_platform, version, py_version) assert requests.head(url).status_code == 200, url @pytest.mark.skipif( sys.platform == "win32", reason="runtime_env unsupported on Windows.") def test_decorator_task(ray_start_cluster_head): @ray.remote(runtime_env={"env_vars": {"foo": "bar"}}) def f(): return os.environ.get("foo") assert ray.get(f.remote()) == "bar" @pytest.mark.skipif( sys.platform == "win32", reason="runtime_env unsupported on Windows.") def test_decorator_actor(ray_start_cluster_head): @ray.remote(runtime_env={"env_vars": {"foo": "bar"}}) class A: def g(self): return os.environ.get("foo") a = A.remote() assert ray.get(a.g.remote()) == "bar" @pytest.mark.skipif( sys.platform == "win32", reason="runtime_env unsupported on Windows.") def test_decorator_complex(shutdown_only): ray.init( job_config=ray.job_config.JobConfig( runtime_env={"env_vars": { "foo": "job" }})) @ray.remote def env_from_job(): return os.environ.get("foo") assert ray.get(env_from_job.remote()) == "job" @ray.remote(runtime_env={"env_vars": {"foo": "task"}}) def f(): return os.environ.get("foo") assert ray.get(f.remote()) == "task" @ray.remote(runtime_env={"env_vars": {"foo": "actor"}}) class A: def g(self): return os.environ.get("foo") a = A.remote() assert ray.get(a.g.remote()) == "actor" # Test that runtime_env can be overridden by specifying .options(). assert ray.get( f.options(runtime_env={ "env_vars": { "foo": "new" } }).remote()) == "new" a = A.options(runtime_env={"env_vars": {"foo": "new2"}}).remote() assert ray.get(a.g.remote()) == "new2" def test_container_option_serialize(): runtime_env = { "container": { "image": "ray:latest", "run_options": ["--name=test"] } } job_config = ray.job_config.JobConfig(runtime_env=runtime_env) job_config_serialized = job_config.serialize() # job_config_serialized is JobConfig protobuf serialized string, # job_config.runtime_env.raw_json has container_option info # job_config.serialized_runtime_env also has container_option info assert job_config_serialized.count(b"image") == 2 def test_working_dir_override_failure(shutdown_only): ray.init() @ray.remote(runtime_env={"working_dir": "."}) def f(): pass with pytest.raises(NotImplementedError): f.remote() @ray.remote def g(): pass with pytest.raises(NotImplementedError): g.options(runtime_env={"working_dir": "."}).remote() @ray.remote(runtime_env={"working_dir": "."}) class A: pass with pytest.raises(NotImplementedError): A.remote() @ray.remote class B: pass with pytest.raises(NotImplementedError): B.options(runtime_env={"working_dir": "."}).remote() if __name__ == "__main__": import sys sys.exit(pytest.main(["-sv", __file__]))
true
true
f702a468588e10d781ab9dbc62f627d8b569cd7c
262
py
Python
src/PostService/__init__.py
Group-13-Bachelor/Microservice
c7186953e6ef63d141ea148e74b6bbbe3242f71e
[ "MIT" ]
null
null
null
src/PostService/__init__.py
Group-13-Bachelor/Microservice
c7186953e6ef63d141ea148e74b6bbbe3242f71e
[ "MIT" ]
1
2022-02-14T09:31:04.000Z
2022-03-02T13:04:33.000Z
src/PostService/__init__.py
Group-13-Bachelor/Microservice
c7186953e6ef63d141ea148e74b6bbbe3242f71e
[ "MIT" ]
null
null
null
from flask import Flask from flask_sqlalchemy import SQLAlchemy app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///posts.db' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # app.config['FLASK_RUN_PORT'] = 5002 db = SQLAlchemy(app)
23.818182
60
0.774809
from flask import Flask from flask_sqlalchemy import SQLAlchemy app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///posts.db' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False db = SQLAlchemy(app)
true
true
f702a49b0738bb825345e613e66c54392b82e80e
1,670
py
Python
test/test_get_account_plan.py
Danilka/APIv3-python-library
c96472f47d652d2e09e8b4a48a80e33fde06e7f1
[ "MIT" ]
null
null
null
test/test_get_account_plan.py
Danilka/APIv3-python-library
c96472f47d652d2e09e8b4a48a80e33fde06e7f1
[ "MIT" ]
null
null
null
test/test_get_account_plan.py
Danilka/APIv3-python-library
c96472f47d652d2e09e8b4a48a80e33fde06e7f1
[ "MIT" ]
null
null
null
# coding: utf-8 """ SendinBlue API SendinBlue provide a RESTFul API that can be used with any languages. With this API, you will be able to : - Manage your campaigns and get the statistics - Manage your contacts - Send transactional Emails and SMS - and much more... You can download our wrappers at https://github.com/orgs/sendinblue **Possible responses** | Code | Message | | :-------------: | ------------- | | 200 | OK. Successful Request | | 201 | OK. Successful Creation | | 202 | OK. Request accepted | | 204 | OK. Successful Update/Deletion | | 400 | Error. Bad Request | | 401 | Error. Authentication Needed | | 402 | Error. Not enough credit, plan upgrade needed | | 403 | Error. Permission denied | | 404 | Error. Object does not exist | | 405 | Error. Method not allowed | # noqa: E501 OpenAPI spec version: 3.0.0 Contact: contact@sendinblue.com Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import unittest import sib_api_v3_sdk from sib_api_v3_sdk.models.get_account_plan import GetAccountPlan # noqa: E501 from sib_api_v3_sdk.rest import ApiException class TestGetAccountPlan(unittest.TestCase): """GetAccountPlan unit test stubs""" def setUp(self): pass def tearDown(self): pass def testGetAccountPlan(self): """Test GetAccountPlan""" # FIXME: construct object with mandatory attributes with example values # model = sib_api_v3_sdk.models.get_account_plan.GetAccountPlan() # noqa: E501 pass if __name__ == '__main__': unittest.main()
40.731707
820
0.673653
from __future__ import absolute_import import unittest import sib_api_v3_sdk from sib_api_v3_sdk.models.get_account_plan import GetAccountPlan from sib_api_v3_sdk.rest import ApiException class TestGetAccountPlan(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def testGetAccountPlan(self): s if __name__ == '__main__': unittest.main()
true
true
f702a4a82b4b1953954629cd1af554b044385a86
5,977
py
Python
model/video_cnn.py
Fengdalu/LEARN-AN-EFFECTIVE-LIP-READING-MODEL-WITHOUT-PAINS
8d5eef415c19b4c5e161259b1222fbfec6a5edb0
[ "MIT" ]
92
2020-11-16T04:23:14.000Z
2022-02-28T19:29:18.000Z
model/video_cnn.py
echoyinke/learn-an-effective-lip-reading-model-without-pains
8d5eef415c19b4c5e161259b1222fbfec6a5edb0
[ "MIT" ]
21
2020-12-26T05:53:56.000Z
2022-01-26T06:47:18.000Z
model/video_cnn.py
echoyinke/learn-an-effective-lip-reading-model-without-pains
8d5eef415c19b4c5e161259b1222fbfec6a5edb0
[ "MIT" ]
22
2020-11-20T04:09:37.000Z
2021-12-25T13:27:27.000Z
# coding: utf-8 import math import numpy as np import torch import torch.nn as nn from torch.autograd import Variable import torch.nn.functional as F def conv3x3(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) def conv1x1(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=1) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, se=False): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride self.se = se if(self.se): self.gap = nn.AdaptiveAvgPool2d(1) self.conv3 = conv1x1(planes, planes//16) self.conv4 = conv1x1(planes//16, planes) def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) if(self.se): w = self.gap(out) w = self.conv3(w) w = self.relu(w) w = self.conv4(w).sigmoid() out = out * w out = out + residual out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, block, layers, se=False): self.inplanes = 64 super(ResNet, self).__init__() self.se = se self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.avgpool = nn.AdaptiveAvgPool2d(1) self.bn = nn.BatchNorm1d(512) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.BatchNorm1d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, se=self.se)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes, se=self.se)) return nn.Sequential(*layers) def forward(self, x): x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.bn(x) return x class VideoCNN(nn.Module): def __init__(self, se=False): super(VideoCNN, self).__init__() # frontend3D self.frontend3D = nn.Sequential( nn.Conv3d(1, 64, kernel_size=(5, 7, 7), stride=(1, 2, 2), padding=(2, 3, 3), bias=False), nn.BatchNorm3d(64), nn.ReLU(True), nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1)) ) # resnet self.resnet18 = ResNet(BasicBlock, [2, 2, 2, 2], se=se) self.dropout = nn.Dropout(p=0.5) # backend_gru # initialize self._initialize_weights() def visual_frontend_forward(self, x): x = x.transpose(1, 2) x = self.frontend3D(x) x = x.transpose(1, 2) x = x.contiguous() x = x.view(-1, 64, x.size(3), x.size(4)) x = self.resnet18(x) return x def forward(self, x): b, t = x.size()[:2] x = self.visual_frontend_forward(x) #x = self.dropout(x) feat = x.view(b, -1, 512) x = x.view(b, -1, 512) return x def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv3d): n = m.kernel_size[0] * m.kernel_size[1] * m.kernel_size[2] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.Conv1d): n = m.kernel_size[0] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm3d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.BatchNorm1d): m.weight.data.fill_(1) m.bias.data.zero_()
31.624339
105
0.52953
import math import numpy as np import torch import torch.nn as nn from torch.autograd import Variable import torch.nn.functional as F def conv3x3(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) def conv1x1(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=1) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, se=False): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride self.se = se if(self.se): self.gap = nn.AdaptiveAvgPool2d(1) self.conv3 = conv1x1(planes, planes//16) self.conv4 = conv1x1(planes//16, planes) def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) if(self.se): w = self.gap(out) w = self.conv3(w) w = self.relu(w) w = self.conv4(w).sigmoid() out = out * w out = out + residual out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, block, layers, se=False): self.inplanes = 64 super(ResNet, self).__init__() self.se = se self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.avgpool = nn.AdaptiveAvgPool2d(1) self.bn = nn.BatchNorm1d(512) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.BatchNorm1d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, se=self.se)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes, se=self.se)) return nn.Sequential(*layers) def forward(self, x): x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.bn(x) return x class VideoCNN(nn.Module): def __init__(self, se=False): super(VideoCNN, self).__init__() self.frontend3D = nn.Sequential( nn.Conv3d(1, 64, kernel_size=(5, 7, 7), stride=(1, 2, 2), padding=(2, 3, 3), bias=False), nn.BatchNorm3d(64), nn.ReLU(True), nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1)) ) self.resnet18 = ResNet(BasicBlock, [2, 2, 2, 2], se=se) self.dropout = nn.Dropout(p=0.5) self._initialize_weights() def visual_frontend_forward(self, x): x = x.transpose(1, 2) x = self.frontend3D(x) x = x.transpose(1, 2) x = x.contiguous() x = x.view(-1, 64, x.size(3), x.size(4)) x = self.resnet18(x) return x def forward(self, x): b, t = x.size()[:2] x = self.visual_frontend_forward(x) feat = x.view(b, -1, 512) x = x.view(b, -1, 512) return x def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv3d): n = m.kernel_size[0] * m.kernel_size[1] * m.kernel_size[2] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.Conv1d): n = m.kernel_size[0] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm3d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.BatchNorm1d): m.weight.data.fill_(1) m.bias.data.zero_()
true
true
f702a59f427362751552fc4050a4365a830015ce
740
py
Python
hack-at-brown-2015/tweepy/__init__.py
hackatbrown/2015.hackatbrown.org
6e6e10b010421228deb562909a1c8bb4272b759f
[ "MIT" ]
24
2015-11-12T06:33:24.000Z
2019-04-16T11:11:13.000Z
hack-at-brown-2015/tweepy/__init__.py
hackatbrown/2015.hackatbrown.org
6e6e10b010421228deb562909a1c8bb4272b759f
[ "MIT" ]
3
2015-11-12T22:16:22.000Z
2021-08-09T07:00:27.000Z
hack-at-brown-2015/tweepy/__init__.py
hackatbrown/2015.hackatbrown.org
6e6e10b010421228deb562909a1c8bb4272b759f
[ "MIT" ]
7
2015-11-12T20:09:56.000Z
2020-12-16T17:59:02.000Z
# Tweepy # Copyright 2009-2010 Joshua Roesslein # See LICENSE for details. """ Tweepy Twitter API library """ __version__ = '3.2.0' __author__ = 'Joshua Roesslein' __license__ = 'MIT' from tweepy.models import Status, User, DirectMessage, Friendship, SavedSearch, SearchResults, ModelFactory, Category from tweepy.error import TweepError from tweepy.api import API from tweepy.cache import Cache, MemoryCache, FileCache from tweepy.auth import OAuthHandler, AppAuthHandler from tweepy.streaming import Stream, StreamListener from tweepy.cursor import Cursor # Global, unauthenticated instance of API api = API() def debug(enable=True, level=1): from six.moves.http_client import HTTPConnection HTTPConnection.debuglevel = level
28.461538
117
0.793243
__version__ = '3.2.0' __author__ = 'Joshua Roesslein' __license__ = 'MIT' from tweepy.models import Status, User, DirectMessage, Friendship, SavedSearch, SearchResults, ModelFactory, Category from tweepy.error import TweepError from tweepy.api import API from tweepy.cache import Cache, MemoryCache, FileCache from tweepy.auth import OAuthHandler, AppAuthHandler from tweepy.streaming import Stream, StreamListener from tweepy.cursor import Cursor api = API() def debug(enable=True, level=1): from six.moves.http_client import HTTPConnection HTTPConnection.debuglevel = level
true
true
f702a73b23acce91aa9cdd7f67b38c7b12687972
162,774
py
Python
test/probe/test_sharder.py
masoud91/swift
19b2e3756521d050a95fb2808d4780d52e7be97f
[ "Apache-2.0" ]
3
2020-08-10T14:13:49.000Z
2021-08-08T10:21:31.000Z
test/probe/test_sharder.py
masoud91/swift
19b2e3756521d050a95fb2808d4780d52e7be97f
[ "Apache-2.0" ]
1
2021-02-02T09:35:05.000Z
2021-02-02T09:35:05.000Z
test/probe/test_sharder.py
masoud91/swift
19b2e3756521d050a95fb2808d4780d52e7be97f
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2017 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import subprocess import uuid from nose import SkipTest import six from six.moves.urllib.parse import quote from swift.common import direct_client, utils from swift.common.manager import Manager from swift.common.memcached import MemcacheRing from swift.common.utils import ShardRange, parse_db_filename, get_db_files, \ quorum_size, config_true_value, Timestamp, md5 from swift.container.backend import ContainerBroker, UNSHARDED, SHARDING, \ SHARDED from swift.container.sharder import CleavingContext, ContainerSharder from swift.container.replicator import ContainerReplicator from swiftclient import client, get_auth, ClientException from swift.proxy.controllers.base import get_cache_key from swift.proxy.controllers.obj import num_container_updates from test import annotate_failure from test.probe import PROXY_BASE_URL from test.probe.brain import BrainSplitter from test.probe.common import ReplProbeTest, get_server_number, \ wait_for_server_to_hangup import mock MIN_SHARD_CONTAINER_THRESHOLD = 4 MAX_SHARD_CONTAINER_THRESHOLD = 100 class ShardCollector(object): """ Returns map of node to tuples of (headers, shard ranges) returned from node """ def __init__(self): self.ranges = {} def __call__(self, cnode, cpart, account, container): self.ranges[cnode['id']] = direct_client.direct_get_container( cnode, cpart, account, container, headers={'X-Backend-Record-Type': 'shard'}) class BaseTestContainerSharding(ReplProbeTest): DELIM = '-' def _maybe_skip_test(self): try: cont_configs = [ utils.readconf(p, 'container-sharder') for p in self.configs['container-sharder'].values()] except ValueError: raise SkipTest('No [container-sharder] section found in ' 'container-server configs') skip_reasons = [] auto_shard = all(config_true_value(c.get('auto_shard', False)) for c in cont_configs) if not auto_shard: skip_reasons.append( 'auto_shard must be true in all container_sharder configs') self.max_shard_size = max( int(c.get('shard_container_threshold', '1000000')) for c in cont_configs) if not (MIN_SHARD_CONTAINER_THRESHOLD <= self.max_shard_size <= MAX_SHARD_CONTAINER_THRESHOLD): skip_reasons.append( 'shard_container_threshold %d must be between %d and %d' % (self.max_shard_size, MIN_SHARD_CONTAINER_THRESHOLD, MAX_SHARD_CONTAINER_THRESHOLD)) def skip_check(reason_list, option, required): values = {int(c.get(option, required)) for c in cont_configs} if values != {required}: reason_list.append('%s must be %s' % (option, required)) skip_check(skip_reasons, 'shard_scanner_batch_size', 10) skip_check(skip_reasons, 'shard_batch_size', 2) if skip_reasons: raise SkipTest(', '.join(skip_reasons)) def _load_rings_and_configs(self): super(BaseTestContainerSharding, self)._load_rings_and_configs() # perform checks for skipping test before starting services self._maybe_skip_test() def _make_object_names(self, number, start=0): return ['obj%s%04d' % (self.DELIM, x) for x in range(start, start + number)] def _setup_container_name(self): # Container where we're PUTting objects self.container_name = 'container%s%s' % (self.DELIM, uuid.uuid4()) def setUp(self): client.logger.setLevel(client.logging.WARNING) client.requests.logging.getLogger().setLevel( client.requests.logging.WARNING) super(BaseTestContainerSharding, self).setUp() _, self.admin_token = get_auth( PROXY_BASE_URL + '/auth/v1.0', 'admin:admin', 'admin') self._setup_container_name() self.init_brain(self.container_name) self.sharders = Manager(['container-sharder']) self.internal_client = self.make_internal_client() self.memcache = MemcacheRing(['127.0.0.1:11211']) def init_brain(self, container_name): self.container_to_shard = container_name self.brain = BrainSplitter( self.url, self.token, self.container_to_shard, None, 'container') self.brain.put_container(policy_index=int(self.policy)) def stop_container_servers(self, node_numbers=None): if node_numbers: ipports = [] server2ipport = {v: k for k, v in self.ipport2server.items()} for number in self.brain.node_numbers[node_numbers]: self.brain.servers.stop(number=number) server = 'container%d' % number ipports.append(server2ipport[server]) else: ipports = [k for k, v in self.ipport2server.items() if v.startswith('container')] self.brain.servers.stop() for ipport in ipports: wait_for_server_to_hangup(ipport) def put_objects(self, obj_names, contents=None): conn = client.Connection(preauthurl=self.url, preauthtoken=self.token) results = [] for obj in obj_names: rdict = {} conn.put_object(self.container_name, obj, contents=contents, response_dict=rdict) results.append((obj, rdict['headers'].get('x-object-version-id'))) return results def delete_objects(self, obj_names_and_versions): conn = client.Connection(preauthurl=self.url, preauthtoken=self.token) for obj in obj_names_and_versions: if isinstance(obj, tuple): obj, version = obj conn.delete_object(self.container_name, obj, query_string='version-id=%s' % version) else: conn.delete_object(self.container_name, obj) def get_container_shard_ranges(self, account=None, container=None, include_deleted=False): account = account if account else self.account container = container if container else self.container_to_shard path = self.internal_client.make_path(account, container) headers = {'X-Backend-Record-Type': 'shard'} if include_deleted: headers['X-Backend-Include-Deleted'] = 'true' resp = self.internal_client.make_request( 'GET', path + '?format=json', headers, [200]) return [ShardRange.from_dict(sr) for sr in json.loads(resp.body)] def direct_get_container_shard_ranges(self, account=None, container=None, expect_failure=False): collector = ShardCollector() self.direct_container_op( collector, account, container, expect_failure) return collector.ranges def get_storage_dir(self, part, node, account=None, container=None): account = account or self.brain.account container = container or self.container_name server_type, config_number = get_server_number( (node['ip'], node['port']), self.ipport2server) assert server_type == 'container' repl_server = '%s-replicator' % server_type conf = utils.readconf(self.configs[repl_server][config_number], section_name=repl_server) datadir = os.path.join(conf['devices'], node['device'], 'containers') container_hash = utils.hash_path(account, container) return (utils.storage_directory(datadir, part, container_hash), container_hash) def get_db_file(self, part, node, account=None, container=None): container_dir, container_hash = self.get_storage_dir( part, node, account=account, container=container) db_file = os.path.join(container_dir, container_hash + '.db') self.assertTrue(get_db_files(db_file)) # sanity check return db_file def get_broker(self, part, node, account=None, container=None): return ContainerBroker( self.get_db_file(part, node, account, container)) def get_shard_broker(self, shard_range, node_index=0): shard_part, shard_nodes = self.brain.ring.get_nodes( shard_range.account, shard_range.container) return self.get_broker( shard_part, shard_nodes[node_index], shard_range.account, shard_range.container) def categorize_container_dir_content(self, account=None, container=None): account = account or self.brain.account container = container or self.container_name part, nodes = self.brain.ring.get_nodes(account, container) storage_dirs = [ self.get_storage_dir(part, node, account=account, container=container)[0] for node in nodes] result = { 'shard_dbs': [], 'normal_dbs': [], 'pendings': [], 'locks': [], 'other': [], } for storage_dir in storage_dirs: for f in os.listdir(storage_dir): path = os.path.join(storage_dir, f) if path.endswith('.db'): hash_, epoch, ext = parse_db_filename(path) if epoch: result['shard_dbs'].append(path) else: result['normal_dbs'].append(path) elif path.endswith('.db.pending'): result['pendings'].append(path) elif path.endswith('/.lock'): result['locks'].append(path) else: result['other'].append(path) if result['other']: self.fail('Found unexpected files in storage directory:\n %s' % '\n '.join(result['other'])) return result def assert_dict_contains(self, expected_items, actual_dict): ignored = set(expected_items) ^ set(actual_dict) filtered_actual = {k: actual_dict[k] for k in actual_dict if k not in ignored} self.assertEqual(expected_items, filtered_actual) def assert_shard_ranges_contiguous(self, expected_number, shard_ranges, first_lower='', last_upper=''): if shard_ranges and isinstance(shard_ranges[0], ShardRange): actual_shard_ranges = sorted(shard_ranges) else: actual_shard_ranges = sorted(ShardRange.from_dict(d) for d in shard_ranges) self.assertLengthEqual(actual_shard_ranges, expected_number) if expected_number: with annotate_failure('Ranges %s.' % actual_shard_ranges): self.assertEqual(first_lower, actual_shard_ranges[0].lower_str) for x, y in zip(actual_shard_ranges, actual_shard_ranges[1:]): self.assertEqual(x.upper, y.lower) self.assertEqual(last_upper, actual_shard_ranges[-1].upper_str) def assert_shard_range_equal(self, expected, actual, excludes=None): excludes = excludes or [] expected_dict = dict(expected) actual_dict = dict(actual) for k in excludes: expected_dict.pop(k, None) actual_dict.pop(k, None) self.assertEqual(expected_dict, actual_dict) def assert_shard_range_lists_equal(self, expected, actual, excludes=None): self.assertEqual(len(expected), len(actual)) for expected, actual in zip(expected, actual): self.assert_shard_range_equal(expected, actual, excludes=excludes) def assert_shard_range_state(self, expected_state, shard_ranges): if shard_ranges and not isinstance(shard_ranges[0], ShardRange): shard_ranges = [ShardRange.from_dict(data) for data in shard_ranges] self.assertEqual([expected_state] * len(shard_ranges), [sr.state for sr in shard_ranges]) def assert_total_object_count(self, expected_object_count, shard_ranges): actual = sum(sr['object_count'] for sr in shard_ranges) self.assertEqual(expected_object_count, actual) def assert_container_listing(self, expected_listing, req_hdrs=None): req_hdrs = req_hdrs if req_hdrs else {} headers, actual_listing = client.get_container( self.url, self.token, self.container_name, headers=req_hdrs) self.assertIn('x-container-object-count', headers) expected_obj_count = len(expected_listing) self.assertEqual(expected_listing, [ x['name'].encode('utf-8') if six.PY2 else x['name'] for x in actual_listing]) self.assertEqual(str(expected_obj_count), headers['x-container-object-count']) return headers, actual_listing def assert_container_object_count(self, expected_obj_count): headers = client.head_container( self.url, self.token, self.container_name) self.assertIn('x-container-object-count', headers) self.assertEqual(str(expected_obj_count), headers['x-container-object-count']) def assert_container_post_ok(self, meta_value): key = 'X-Container-Meta-Assert-Post-Works' headers = {key: meta_value} client.post_container( self.url, self.token, self.container_name, headers=headers) resp_headers = client.head_container( self.url, self.token, self.container_name) self.assertEqual(meta_value, resp_headers.get(key.lower())) def assert_container_post_fails(self, meta_value): key = 'X-Container-Meta-Assert-Post-Works' headers = {key: meta_value} with self.assertRaises(ClientException) as cm: client.post_container( self.url, self.token, self.container_name, headers=headers) self.assertEqual(404, cm.exception.http_status) def assert_container_delete_fails(self): with self.assertRaises(ClientException) as cm: client.delete_container(self.url, self.token, self.container_name) self.assertEqual(409, cm.exception.http_status) def assert_container_not_found(self): with self.assertRaises(ClientException) as cm: client.get_container(self.url, self.token, self.container_name) self.assertEqual(404, cm.exception.http_status) # check for headers leaking out while deleted resp_headers = cm.exception.http_response_headers self.assertNotIn('X-Container-Object-Count', resp_headers) self.assertNotIn('X-Container-Bytes-Used', resp_headers) self.assertNotIn('X-Timestamp', resp_headers) self.assertNotIn('X-PUT-Timestamp', resp_headers) def assert_container_has_shard_sysmeta(self): node_headers = self.direct_head_container() for node_id, headers in node_headers.items(): with annotate_failure('%s in %s' % (node_id, node_headers.keys())): for k, v in headers.items(): if k.lower().startswith('x-container-sysmeta-shard'): break else: self.fail('No shard sysmeta found in %s' % headers) def assert_container_state(self, node, expected_state, num_shard_ranges): headers, shard_ranges = direct_client.direct_get_container( node, self.brain.part, self.account, self.container_to_shard, headers={'X-Backend-Record-Type': 'shard'}) self.assertEqual(num_shard_ranges, len(shard_ranges)) self.assertIn('X-Backend-Sharding-State', headers) self.assertEqual( expected_state, headers['X-Backend-Sharding-State']) return [ShardRange.from_dict(sr) for sr in shard_ranges] def assert_subprocess_success(self, cmd_args): try: subprocess.check_output(cmd_args, stderr=subprocess.STDOUT) except Exception as exc: # why not 'except CalledProcessError'? because in my py3.6 tests # the CalledProcessError wasn't caught by that! despite type(exc) # being a CalledProcessError, isinstance(exc, CalledProcessError) # is False and the type has a different hash - could be # related to https://github.com/eventlet/eventlet/issues/413 try: # assume this is a CalledProcessError self.fail('%s with output:\n%s' % (exc, exc.output)) except AttributeError: raise exc def get_part_and_node_numbers(self, shard_range): """Return the partition and node numbers for a shard range.""" part, nodes = self.brain.ring.get_nodes( shard_range.account, shard_range.container) return part, [n['id'] + 1 for n in nodes] def run_sharders(self, shard_ranges): """Run the sharder on partitions for given shard ranges.""" if not isinstance(shard_ranges, (list, tuple, set)): shard_ranges = (shard_ranges,) partitions = ','.join(str(self.get_part_and_node_numbers(sr)[0]) for sr in shard_ranges) self.sharders.once(additional_args='--partitions=%s' % partitions) def run_sharder_sequentially(self, shard_range=None): """Run sharder node by node on partition for given shard range.""" if shard_range: part, node_numbers = self.get_part_and_node_numbers(shard_range) else: part, node_numbers = self.brain.part, self.brain.node_numbers for node_number in node_numbers: self.sharders.once(number=node_number, additional_args='--partitions=%s' % part) def run_custom_sharder(self, conf_index, custom_conf, **kwargs): return self.run_custom_daemon(ContainerSharder, 'container-sharder', conf_index, custom_conf, **kwargs) class TestContainerShardingNonUTF8(BaseTestContainerSharding): def test_sharding_listing(self): # verify parameterised listing of a container during sharding all_obj_names = self._make_object_names(4 * self.max_shard_size) obj_names = all_obj_names[::2] obj_content = 'testing' self.put_objects(obj_names, contents=obj_content) # choose some names approx in middle of each expected shard range markers = [ obj_names[i] for i in range(self.max_shard_size // 4, 2 * self.max_shard_size, self.max_shard_size // 2)] def check_listing(objects, req_hdrs=None, **params): req_hdrs = req_hdrs if req_hdrs else {} qs = '&'.join('%s=%s' % (k, quote(str(v))) for k, v in params.items()) headers, listing = client.get_container( self.url, self.token, self.container_name, query_string=qs, headers=req_hdrs) listing = [x['name'].encode('utf-8') if six.PY2 else x['name'] for x in listing] if params.get('reverse'): marker = params.get('marker', ShardRange.MAX) end_marker = params.get('end_marker', ShardRange.MIN) expected = [o for o in objects if end_marker < o < marker] expected.reverse() else: marker = params.get('marker', ShardRange.MIN) end_marker = params.get('end_marker', ShardRange.MAX) expected = [o for o in objects if marker < o < end_marker] if 'limit' in params: expected = expected[:params['limit']] self.assertEqual(expected, listing) self.assertIn('x-timestamp', headers) self.assertIn('last-modified', headers) self.assertIn('x-trans-id', headers) self.assertEqual('bytes', headers.get('accept-ranges')) self.assertEqual('application/json; charset=utf-8', headers.get('content-type')) def check_listing_fails(exp_status, **params): qs = '&'.join(['%s=%s' % param for param in params.items()]) with self.assertRaises(ClientException) as cm: client.get_container( self.url, self.token, self.container_name, query_string=qs) self.assertEqual(exp_status, cm.exception.http_status) return cm.exception def do_listing_checks(objs, hdrs=None): hdrs = hdrs if hdrs else {} check_listing(objs, hdrs) check_listing(objs, hdrs, marker=markers[0], end_marker=markers[1]) check_listing(objs, hdrs, marker=markers[0], end_marker=markers[2]) check_listing(objs, hdrs, marker=markers[1], end_marker=markers[3]) check_listing(objs, hdrs, marker=markers[1], end_marker=markers[3], limit=self.max_shard_size // 4) check_listing(objs, hdrs, marker=markers[1], end_marker=markers[3], limit=self.max_shard_size // 4) check_listing(objs, hdrs, marker=markers[1], end_marker=markers[2], limit=self.max_shard_size // 2) check_listing(objs, hdrs, marker=markers[1], end_marker=markers[1]) check_listing(objs, hdrs, reverse=True) check_listing(objs, hdrs, reverse=True, end_marker=markers[1]) check_listing(objs, hdrs, reverse=True, marker=markers[3], end_marker=markers[1], limit=self.max_shard_size // 4) check_listing(objs, hdrs, reverse=True, marker=markers[3], end_marker=markers[1], limit=0) check_listing([], hdrs, marker=markers[0], end_marker=markers[0]) check_listing([], hdrs, marker=markers[0], end_marker=markers[1], reverse=True) check_listing(objs, hdrs, prefix='obj') check_listing([], hdrs, prefix='zzz') # delimiter headers, listing = client.get_container( self.url, self.token, self.container_name, query_string='delimiter=' + quote(self.DELIM), headers=hdrs) self.assertEqual([{'subdir': 'obj' + self.DELIM}], listing) headers, listing = client.get_container( self.url, self.token, self.container_name, query_string='delimiter=j' + quote(self.DELIM), headers=hdrs) self.assertEqual([{'subdir': 'obj' + self.DELIM}], listing) limit = self.cluster_info['swift']['container_listing_limit'] exc = check_listing_fails(412, limit=limit + 1) self.assertIn(b'Maximum limit', exc.http_response_content) exc = check_listing_fails(400, delimiter='%ff') self.assertIn(b'not valid UTF-8', exc.http_response_content) # sanity checks do_listing_checks(obj_names) # Shard the container client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) # First run the 'leader' in charge of scanning, which finds all shard # ranges and cleaves first two self.sharders.once(number=self.brain.node_numbers[0], additional_args='--partitions=%s' % self.brain.part) # Then run sharder on other nodes which will also cleave first two # shard ranges for n in self.brain.node_numbers[1:]: self.sharders.once( number=n, additional_args='--partitions=%s' % self.brain.part) # sanity check shard range states for node in self.brain.nodes: self.assert_container_state(node, 'sharding', 4) shard_ranges = self.get_container_shard_ranges() self.assertLengthEqual(shard_ranges, 4) self.assert_shard_range_state(ShardRange.CLEAVED, shard_ranges[:2]) self.assert_shard_range_state(ShardRange.CREATED, shard_ranges[2:]) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() # confirm no sysmeta deleted self.assert_container_post_ok('sharding') do_listing_checks(obj_names) # put some new objects spread through entire namespace; object updates # should be directed to the shard container (both the cleaved and the # created shards) new_obj_names = all_obj_names[1::4] self.put_objects(new_obj_names, obj_content) # new objects that fell into the first two cleaved shard ranges are # reported in listing; new objects in the yet-to-be-cleaved shard # ranges are not yet included in listing because listings prefer the # root over the final two shards that are not yet-cleaved exp_obj_names = [o for o in obj_names + new_obj_names if o <= shard_ranges[1].upper] exp_obj_names += [o for o in obj_names if o > shard_ranges[1].upper] exp_obj_names.sort() do_listing_checks(exp_obj_names) # run all the sharders again and the last two shard ranges get cleaved self.sharders.once(additional_args='--partitions=%s' % self.brain.part) for node in self.brain.nodes: self.assert_container_state(node, 'sharded', 4) shard_ranges = self.get_container_shard_ranges() self.assert_shard_range_state(ShardRange.ACTIVE, shard_ranges) # listings are now gathered from all four shard ranges so should have # all the specified objects exp_obj_names = obj_names + new_obj_names exp_obj_names.sort() do_listing_checks(exp_obj_names) # shard ranges may now be cached by proxy so do listings checks again # forcing backend request do_listing_checks(exp_obj_names, hdrs={'X-Newest': 'true'}) # post more metadata to the container and check that it is read back # correctly from backend (using x-newest) and cache test_headers = {'x-container-meta-test': 'testing', 'x-container-read': 'read_acl', 'x-container-write': 'write_acl', 'x-container-sync-key': 'sync_key', # 'x-container-sync-to': 'sync_to', 'x-versions-location': 'versions', 'x-container-meta-access-control-allow-origin': 'aa', 'x-container-meta-access-control-expose-headers': 'bb', 'x-container-meta-access-control-max-age': '123'} client.post_container(self.url, self.admin_token, self.container_name, headers=test_headers) headers, listing = client.get_container( self.url, self.token, self.container_name, headers={'X-Newest': 'true'}) exp_headers = dict(test_headers) exp_headers.update({ 'x-container-object-count': str(len(exp_obj_names)), 'x-container-bytes-used': str(len(exp_obj_names) * len(obj_content)) }) for k, v in exp_headers.items(): self.assertIn(k, headers) self.assertEqual(v, headers[k], dict(headers)) cache_headers, listing = client.get_container( self.url, self.token, self.container_name) for k, v in exp_headers.items(): self.assertIn(k, cache_headers) self.assertEqual(v, cache_headers[k], dict(exp_headers)) # we don't expect any of these headers to be equal... for k in ('x-timestamp', 'last-modified', 'date', 'x-trans-id', 'x-openstack-request-id'): headers.pop(k, None) cache_headers.pop(k, None) self.assertEqual(headers, cache_headers) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') # delete original objects self.delete_objects(obj_names) do_listing_checks(new_obj_names) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') class TestContainerShardingFunkyNames(TestContainerShardingNonUTF8): DELIM = '\n' def _make_object_names(self, number): return ['obj\n%04d%%Ff' % x for x in range(number)] def _setup_container_name(self): self.container_name = 'container\n%%Ff\n%s' % uuid.uuid4() class TestContainerShardingUTF8(TestContainerShardingNonUTF8): def _make_object_names(self, number, start=0): # override default with names that include non-ascii chars name_length = self.cluster_info['swift']['max_object_name_length'] obj_names = [] for x in range(start, start + number): name = (u'obj-\u00e4\u00ea\u00ec\u00f2\u00fb\u1234-%04d' % x) name = name.encode('utf8').ljust(name_length, b'o') if not six.PY2: name = name.decode('utf8') obj_names.append(name) return obj_names def _setup_container_name(self): # override default with max length name that includes non-ascii chars super(TestContainerShardingUTF8, self)._setup_container_name() name_length = self.cluster_info['swift']['max_container_name_length'] cont_name = \ self.container_name + u'-\u00e4\u00ea\u00ec\u00f2\u00fb\u1234' self.container_name = cont_name.encode('utf8').ljust(name_length, b'x') if not six.PY2: self.container_name = self.container_name.decode('utf8') class TestContainerShardingObjectVersioning(BaseTestContainerSharding): def _maybe_skip_test(self): super(TestContainerShardingObjectVersioning, self)._maybe_skip_test() try: vw_config = utils.readconf(self.configs['proxy-server'], 'filter:versioned_writes') except ValueError: raise SkipTest('No [filter:versioned_writes] section found in ' 'proxy-server configs') allow_object_versioning = config_true_value( vw_config.get('allow_object_versioning', False)) if not allow_object_versioning: raise SkipTest('allow_object_versioning must be true ' 'in all versioned_writes configs') def init_brain(self, container_name): client.put_container(self.url, self.token, container_name, headers={ 'X-Storage-Policy': self.policy.name, 'X-Versions-Enabled': 'true', }) self.container_to_shard = '\x00versions\x00' + container_name self.brain = BrainSplitter( self.url, self.token, self.container_to_shard, None, 'container') def test_sharding_listing(self): # verify parameterised listing of a container during sharding all_obj_names = self._make_object_names(3) * self.max_shard_size all_obj_names.extend(self._make_object_names(self.max_shard_size, start=3)) obj_names = all_obj_names[::2] obj_names_and_versions = self.put_objects(obj_names) def sort_key(obj_and_ver): obj, ver = obj_and_ver return obj, ~Timestamp(ver) obj_names_and_versions.sort(key=sort_key) # choose some names approx in middle of each expected shard range markers = [ obj_names_and_versions[i] for i in range(self.max_shard_size // 4, 2 * self.max_shard_size, self.max_shard_size // 2)] def check_listing(objects, **params): params['versions'] = '' qs = '&'.join('%s=%s' % param for param in params.items()) headers, listing = client.get_container( self.url, self.token, self.container_name, query_string=qs) listing = [(x['name'].encode('utf-8') if six.PY2 else x['name'], x['version_id']) for x in listing] if params.get('reverse'): marker = ( params.get('marker', ShardRange.MAX), ~Timestamp(params['version_marker']) if 'version_marker' in params else ~Timestamp('0'), ) end_marker = ( params.get('end_marker', ShardRange.MIN), Timestamp('0'), ) expected = [o for o in objects if end_marker < sort_key(o) < marker] expected.reverse() else: marker = ( params.get('marker', ShardRange.MIN), ~Timestamp(params['version_marker']) if 'version_marker' in params else Timestamp('0'), ) end_marker = ( params.get('end_marker', ShardRange.MAX), ~Timestamp('0'), ) expected = [o for o in objects if marker < sort_key(o) < end_marker] if 'limit' in params: expected = expected[:params['limit']] self.assertEqual(expected, listing) def check_listing_fails(exp_status, **params): params['versions'] = '' qs = '&'.join('%s=%s' % param for param in params.items()) with self.assertRaises(ClientException) as cm: client.get_container( self.url, self.token, self.container_name, query_string=qs) self.assertEqual(exp_status, cm.exception.http_status) return cm.exception def do_listing_checks(objects): check_listing(objects) check_listing(objects, marker=markers[0][0], version_marker=markers[0][1]) check_listing(objects, marker=markers[0][0], version_marker=markers[0][1], limit=self.max_shard_size // 10) check_listing(objects, marker=markers[0][0], version_marker=markers[0][1], limit=self.max_shard_size // 4) check_listing(objects, marker=markers[0][0], version_marker=markers[0][1], limit=self.max_shard_size // 2) check_listing(objects, marker=markers[1][0], version_marker=markers[1][1]) check_listing(objects, marker=markers[1][0], version_marker=markers[1][1], limit=self.max_shard_size // 10) check_listing(objects, marker=markers[2][0], version_marker=markers[2][1], limit=self.max_shard_size // 4) check_listing(objects, marker=markers[2][0], version_marker=markers[2][1], limit=self.max_shard_size // 2) check_listing(objects, reverse=True) check_listing(objects, reverse=True, marker=markers[1][0], version_marker=markers[1][1]) check_listing(objects, prefix='obj') check_listing([], prefix='zzz') # delimiter headers, listing = client.get_container( self.url, self.token, self.container_name, query_string='delimiter=-') self.assertEqual([{'subdir': 'obj-'}], listing) headers, listing = client.get_container( self.url, self.token, self.container_name, query_string='delimiter=j-') self.assertEqual([{'subdir': 'obj-'}], listing) limit = self.cluster_info['swift']['container_listing_limit'] exc = check_listing_fails(412, limit=limit + 1) self.assertIn(b'Maximum limit', exc.http_response_content) exc = check_listing_fails(400, delimiter='%ff') self.assertIn(b'not valid UTF-8', exc.http_response_content) # sanity checks do_listing_checks(obj_names_and_versions) # Shard the container. Use an internal_client so we get an implicit # X-Backend-Allow-Reserved-Names header self.internal_client.set_container_metadata( self.account, self.container_to_shard, { 'X-Container-Sysmeta-Sharding': 'True', }) # First run the 'leader' in charge of scanning, which finds all shard # ranges and cleaves first two self.sharders.once(number=self.brain.node_numbers[0], additional_args='--partitions=%s' % self.brain.part) # Then run sharder on other nodes which will also cleave first two # shard ranges for n in self.brain.node_numbers[1:]: self.sharders.once( number=n, additional_args='--partitions=%s' % self.brain.part) # sanity check shard range states for node in self.brain.nodes: self.assert_container_state(node, 'sharding', 4) shard_ranges = self.get_container_shard_ranges() self.assertLengthEqual(shard_ranges, 4) self.assert_shard_range_state(ShardRange.CLEAVED, shard_ranges[:2]) self.assert_shard_range_state(ShardRange.CREATED, shard_ranges[2:]) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() # confirm no sysmeta deleted self.assert_container_post_ok('sharding') do_listing_checks(obj_names_and_versions) # put some new objects spread through entire namespace new_obj_names = all_obj_names[1::4] new_obj_names_and_versions = self.put_objects(new_obj_names) # new objects that fell into the first two cleaved shard ranges are # reported in listing, new objects in the yet-to-be-cleaved shard # ranges are not yet included in listing exp_obj_names_and_versions = [ o for o in obj_names_and_versions + new_obj_names_and_versions if '\x00' + o[0] <= shard_ranges[1].upper] exp_obj_names_and_versions += [ o for o in obj_names_and_versions if '\x00' + o[0] > shard_ranges[1].upper] exp_obj_names_and_versions.sort(key=sort_key) do_listing_checks(exp_obj_names_and_versions) # run all the sharders again and the last two shard ranges get cleaved self.sharders.once(additional_args='--partitions=%s' % self.brain.part) for node in self.brain.nodes: self.assert_container_state(node, 'sharded', 4) shard_ranges = self.get_container_shard_ranges() self.assert_shard_range_state(ShardRange.ACTIVE, shard_ranges) exp_obj_names_and_versions = \ obj_names_and_versions + new_obj_names_and_versions exp_obj_names_and_versions.sort(key=sort_key) do_listing_checks(exp_obj_names_and_versions) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') # delete original objects self.delete_objects(obj_names_and_versions) new_obj_names_and_versions.sort(key=sort_key) do_listing_checks(new_obj_names_and_versions) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') class TestContainerSharding(BaseTestContainerSharding): def _test_sharded_listing(self, run_replicators=False): obj_names = self._make_object_names(self.max_shard_size) self.put_objects(obj_names) # Verify that we start out with normal DBs, no shards found = self.categorize_container_dir_content() self.assertLengthEqual(found['normal_dbs'], 3) self.assertLengthEqual(found['shard_dbs'], 0) for db_file in found['normal_dbs']: broker = ContainerBroker(db_file) self.assertIs(True, broker.is_root_container()) self.assertEqual('unsharded', broker.get_db_state()) self.assertLengthEqual(broker.get_shard_ranges(), 0) headers, pre_sharding_listing = client.get_container( self.url, self.token, self.container_name) self.assertEqual(obj_names, [ x['name'].encode('utf-8') if six.PY2 else x['name'] for x in pre_sharding_listing]) # sanity # Shard it client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) pre_sharding_headers = client.head_container( self.url, self.admin_token, self.container_name) self.assertEqual('True', pre_sharding_headers.get('x-container-sharding')) # Only run the one in charge of scanning self.sharders.once(number=self.brain.node_numbers[0], additional_args='--partitions=%s' % self.brain.part) # Verify that we have one sharded db -- though the other normal DBs # received the shard ranges that got defined found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 1) broker = self.get_broker(self.brain.part, self.brain.nodes[0]) # sanity check - the shard db is on replica 0 self.assertEqual(found['shard_dbs'][0], broker.db_file) self.assertIs(True, broker.is_root_container()) self.assertEqual('sharded', broker.get_db_state()) orig_root_shard_ranges = [dict(sr) for sr in broker.get_shard_ranges()] self.assertLengthEqual(orig_root_shard_ranges, 2) self.assert_total_object_count(len(obj_names), orig_root_shard_ranges) self.assert_shard_ranges_contiguous(2, orig_root_shard_ranges) self.assertEqual([ShardRange.ACTIVE, ShardRange.ACTIVE], [sr['state'] for sr in orig_root_shard_ranges]) # Contexts should still be there, and should be complete contexts = set([ctx.done() for ctx, _ in CleavingContext.load_all(broker)]) self.assertEqual({True}, contexts) self.direct_delete_container(expect_failure=True) self.assertLengthEqual(found['normal_dbs'], 2) for db_file in found['normal_dbs']: broker = ContainerBroker(db_file) self.assertIs(True, broker.is_root_container()) self.assertEqual('unsharded', broker.get_db_state()) shard_ranges = [dict(sr) for sr in broker.get_shard_ranges()] self.assertEqual([ShardRange.CREATED, ShardRange.CREATED], [sr['state'] for sr in shard_ranges]) # the sharded db had shard range meta_timestamps and state updated # during cleaving, so we do not expect those to be equal on other # nodes self.assert_shard_range_lists_equal( orig_root_shard_ranges, shard_ranges, excludes=['meta_timestamp', 'state', 'state_timestamp']) contexts = list(CleavingContext.load_all(broker)) self.assertEqual([], contexts) # length check if run_replicators: Manager(['container-replicator']).once() # replication doesn't change the db file names found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 1) self.assertLengthEqual(found['normal_dbs'], 2) # Now that everyone has shard ranges, run *everyone* self.sharders.once(additional_args='--partitions=%s' % self.brain.part) # Verify that we only have shard dbs now found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 3) self.assertLengthEqual(found['normal_dbs'], 0) # Shards stayed the same for db_file in found['shard_dbs']: broker = ContainerBroker(db_file) self.assertIs(True, broker.is_root_container()) self.assertEqual('sharded', broker.get_db_state()) # Well, except for meta_timestamps, since the shards each reported self.assert_shard_range_lists_equal( orig_root_shard_ranges, broker.get_shard_ranges(), excludes=['meta_timestamp', 'state_timestamp']) for orig, updated in zip(orig_root_shard_ranges, broker.get_shard_ranges()): self.assertGreaterEqual(updated.state_timestamp, orig['state_timestamp']) self.assertGreaterEqual(updated.meta_timestamp, orig['meta_timestamp']) # Contexts should still be there, and should be complete contexts = set([ctx.done() for ctx, _ in CleavingContext.load_all(broker)]) self.assertEqual({True}, contexts) # Check that entire listing is available headers, actual_listing = self.assert_container_listing(obj_names) # ... and check some other container properties self.assertEqual(headers['last-modified'], pre_sharding_headers['last-modified']) # It even works in reverse! headers, listing = client.get_container(self.url, self.token, self.container_name, query_string='reverse=on') self.assertEqual(pre_sharding_listing[::-1], listing) # and repeat checks to use shard ranges now cached in proxy headers, actual_listing = self.assert_container_listing(obj_names) self.assertEqual(headers['last-modified'], pre_sharding_headers['last-modified']) headers, listing = client.get_container(self.url, self.token, self.container_name, query_string='reverse=on') self.assertEqual(pre_sharding_listing[::-1], listing) # Now put some new objects into first shard, taking its count to # 3 shard ranges' worth more_obj_names = [ 'beta%03d' % x for x in range(self.max_shard_size)] self.put_objects(more_obj_names) # The listing includes new objects (shard ranges haven't changed, just # their object content, so cached shard ranges are still correct)... headers, listing = self.assert_container_listing( more_obj_names + obj_names) self.assertEqual(pre_sharding_listing, listing[len(more_obj_names):]) # ...but root object count is out of date until the sharders run and # update the root self.assert_container_object_count(len(obj_names)) # run sharders on the shard to get root updated shard_1 = ShardRange.from_dict(orig_root_shard_ranges[0]) self.run_sharders(shard_1) self.assert_container_object_count(len(more_obj_names + obj_names)) # we've added objects enough that we need to shard the first shard # *again* into three new sub-shards, but nothing happens until the root # leader identifies shard candidate... root_shard_ranges = self.direct_get_container_shard_ranges() for node, (hdrs, root_shards) in root_shard_ranges.items(): self.assertLengthEqual(root_shards, 2) with annotate_failure('node %s. ' % node): self.assertEqual( [ShardRange.ACTIVE] * 2, [sr['state'] for sr in root_shards]) # orig shards 0, 1 should be contiguous self.assert_shard_ranges_contiguous(2, root_shards) # Now run the root leader to identify shard candidate...while one of # the shard container servers is down shard_1_part, shard_1_nodes = self.get_part_and_node_numbers(shard_1) self.brain.servers.stop(number=shard_1_nodes[2]) self.sharders.once(number=self.brain.node_numbers[0], additional_args='--partitions=%s' % self.brain.part) # ... so third replica of first shard state is not moved to sharding found_for_shard = self.categorize_container_dir_content( shard_1.account, shard_1.container) self.assertLengthEqual(found_for_shard['normal_dbs'], 3) self.assertEqual( [ShardRange.SHARDING, ShardRange.SHARDING, ShardRange.ACTIVE], [ContainerBroker(db_file).get_own_shard_range().state for db_file in found_for_shard['normal_dbs']]) # ...then run first cycle of first shard sharders in order, leader # first, to get to predictable state where all nodes have cleaved 2 out # of 3 ranges...starting with first two nodes for node_number in shard_1_nodes[:2]: self.sharders.once( number=node_number, additional_args='--partitions=%s' % shard_1_part) # ... first two replicas start sharding to sub-shards found_for_shard = self.categorize_container_dir_content( shard_1.account, shard_1.container) self.assertLengthEqual(found_for_shard['shard_dbs'], 2) for db_file in found_for_shard['shard_dbs'][:2]: broker = ContainerBroker(db_file) with annotate_failure('shard db file %s. ' % db_file): self.assertIs(False, broker.is_root_container()) self.assertEqual('sharding', broker.get_db_state()) self.assertEqual( ShardRange.SHARDING, broker.get_own_shard_range().state) shard_shards = broker.get_shard_ranges() self.assertEqual( [ShardRange.CLEAVED, ShardRange.CLEAVED, ShardRange.CREATED], [sr.state for sr in shard_shards]) self.assert_shard_ranges_contiguous( 3, shard_shards, first_lower=orig_root_shard_ranges[0]['lower'], last_upper=orig_root_shard_ranges[0]['upper']) contexts = list(CleavingContext.load_all(broker)) self.assertEqual(len(contexts), 1) context, _lm = contexts[0] self.assertIs(context.cleaving_done, False) self.assertIs(context.misplaced_done, True) self.assertEqual(context.ranges_done, 2) self.assertEqual(context.ranges_todo, 1) self.assertEqual(context.max_row, self.max_shard_size * 3 // 2) # but third replica still has no idea it should be sharding self.assertLengthEqual(found_for_shard['normal_dbs'], 3) self.assertEqual( ShardRange.ACTIVE, ContainerBroker( found_for_shard['normal_dbs'][2]).get_own_shard_range().state) # ...but once sharder runs on third replica it will learn its state; # note that any root replica on the stopped container server also won't # know about the shards being in sharding state, so leave that server # stopped for now so that shard fetches its state from an up-to-date # root replica self.sharders.once( number=shard_1_nodes[2], additional_args='--partitions=%s' % shard_1_part) # third replica is sharding but has no sub-shard ranges yet... found_for_shard = self.categorize_container_dir_content( shard_1.account, shard_1.container) self.assertLengthEqual(found_for_shard['shard_dbs'], 2) self.assertLengthEqual(found_for_shard['normal_dbs'], 3) broker = ContainerBroker(found_for_shard['normal_dbs'][2]) self.assertEqual('unsharded', broker.get_db_state()) self.assertEqual( ShardRange.SHARDING, broker.get_own_shard_range().state) self.assertFalse(broker.get_shard_ranges()) contexts = list(CleavingContext.load_all(broker)) self.assertEqual([], contexts) # length check # ...until sub-shard ranges are replicated from another shard replica; # there may also be a sub-shard replica missing so run replicators on # all nodes to fix that if necessary self.brain.servers.start(number=shard_1_nodes[2]) self.replicators.once() # Now that the replicators have all run, third replica sees cleaving # contexts for the first two contexts = list(CleavingContext.load_all(broker)) self.assertEqual(len(contexts), 2) # now run sharder again on third replica self.sharders.once( number=shard_1_nodes[2], additional_args='--partitions=%s' % shard_1_part) sharding_broker = ContainerBroker(found_for_shard['normal_dbs'][2]) self.assertEqual('sharding', sharding_broker.get_db_state()) broker_id = broker.get_info()['id'] # Old, unsharded DB doesn't have the context... contexts = list(CleavingContext.load_all(broker)) self.assertEqual(len(contexts), 2) self.assertNotIn(broker_id, [ctx[0].ref for ctx in contexts]) # ...but the sharding one does contexts = list(CleavingContext.load_all(sharding_broker)) self.assertEqual(len(contexts), 3) self.assertIn(broker_id, [ctx[0].ref for ctx in contexts]) # check original first shard range state and sub-shards - all replicas # should now be in consistent state found_for_shard = self.categorize_container_dir_content( shard_1.account, shard_1.container) self.assertLengthEqual(found_for_shard['shard_dbs'], 3) self.assertLengthEqual(found_for_shard['normal_dbs'], 3) for db_file in found_for_shard['shard_dbs']: broker = ContainerBroker(db_file) with annotate_failure('shard db file %s. ' % db_file): self.assertIs(False, broker.is_root_container()) self.assertEqual('sharding', broker.get_db_state()) self.assertEqual( ShardRange.SHARDING, broker.get_own_shard_range().state) shard_shards = broker.get_shard_ranges() self.assertEqual( [ShardRange.CLEAVED, ShardRange.CLEAVED, ShardRange.CREATED], [sr.state for sr in shard_shards]) self.assert_shard_ranges_contiguous( 3, shard_shards, first_lower=orig_root_shard_ranges[0]['lower'], last_upper=orig_root_shard_ranges[0]['upper']) # check third sub-shard is in created state sub_shard = shard_shards[2] found_for_sub_shard = self.categorize_container_dir_content( sub_shard.account, sub_shard.container) self.assertFalse(found_for_sub_shard['shard_dbs']) self.assertLengthEqual(found_for_sub_shard['normal_dbs'], 3) for db_file in found_for_sub_shard['normal_dbs']: broker = ContainerBroker(db_file) with annotate_failure('sub shard db file %s. ' % db_file): self.assertIs(False, broker.is_root_container()) self.assertEqual('unsharded', broker.get_db_state()) self.assertEqual( ShardRange.CREATED, broker.get_own_shard_range().state) self.assertFalse(broker.get_shard_ranges()) # check root shard ranges root_shard_ranges = self.direct_get_container_shard_ranges() for node, (hdrs, root_shards) in root_shard_ranges.items(): self.assertLengthEqual(root_shards, 5) with annotate_failure('node %s. ' % node): # shard ranges are sorted by upper, state, lower, so expect: # sub-shards, orig shard 0, orig shard 1 self.assertEqual( [ShardRange.CLEAVED, ShardRange.CLEAVED, ShardRange.CREATED, ShardRange.SHARDING, ShardRange.ACTIVE], [sr['state'] for sr in root_shards]) # sub-shards 0, 1, 2, orig shard 1 should be contiguous self.assert_shard_ranges_contiguous( 4, root_shards[:3] + root_shards[4:]) # orig shards 0, 1 should be contiguous self.assert_shard_ranges_contiguous(2, root_shards[3:]) self.assert_container_listing(more_obj_names + obj_names) self.assert_container_object_count(len(more_obj_names + obj_names)) # Before writing, kill the cache self.memcache.delete(get_cache_key( self.account, self.container_name, shard='updating')) # add another object that lands in the first of the new sub-shards self.put_objects(['alpha']) # check that alpha object is in the first new shard shard_listings = self.direct_get_container(shard_shards[0].account, shard_shards[0].container) for node, (hdrs, listing) in shard_listings.items(): with annotate_failure(node): self.assertIn('alpha', [o['name'] for o in listing]) self.assert_container_listing(['alpha'] + more_obj_names + obj_names) # Run sharders again so things settle. self.run_sharders(shard_1) # Also run replicators to settle cleaving contexts self.replicators.once() # check original first shard range shards for db_file in found_for_shard['shard_dbs']: broker = ContainerBroker(db_file) with annotate_failure('shard db file %s. ' % db_file): self.assertIs(False, broker.is_root_container()) self.assertEqual('sharded', broker.get_db_state()) self.assertEqual( [ShardRange.ACTIVE] * 3, [sr.state for sr in broker.get_shard_ranges()]) # Contexts should still be there, and should be complete contexts = set([ctx.done() for ctx, _ in CleavingContext.load_all(broker)]) self.assertEqual({True}, contexts) # check root shard ranges root_shard_ranges = self.direct_get_container_shard_ranges() for node, (hdrs, root_shards) in root_shard_ranges.items(): # old first shard range should have been deleted self.assertLengthEqual(root_shards, 4) with annotate_failure('node %s. ' % node): self.assertEqual( [ShardRange.ACTIVE] * 4, [sr['state'] for sr in root_shards]) self.assert_shard_ranges_contiguous(4, root_shards) headers, final_listing = self.assert_container_listing( ['alpha'] + more_obj_names + obj_names) # check root found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 3) self.assertLengthEqual(found['normal_dbs'], 0) new_shard_ranges = None for db_file in found['shard_dbs']: broker = ContainerBroker(db_file) self.assertIs(True, broker.is_root_container()) self.assertEqual('sharded', broker.get_db_state()) if new_shard_ranges is None: new_shard_ranges = broker.get_shard_ranges( include_deleted=True) self.assertLengthEqual(new_shard_ranges, 5) # Second half is still there, and unchanged self.assertIn( dict(orig_root_shard_ranges[1], meta_timestamp=None, state_timestamp=None), [dict(sr, meta_timestamp=None, state_timestamp=None) for sr in new_shard_ranges]) # But the first half split in three, then deleted by_name = {sr.name: sr for sr in new_shard_ranges} self.assertIn(orig_root_shard_ranges[0]['name'], by_name) old_shard_range = by_name.pop( orig_root_shard_ranges[0]['name']) self.assertTrue(old_shard_range.deleted) self.assert_shard_ranges_contiguous(4, list(by_name.values())) else: # Everyone's on the same page. Well, except for # meta_timestamps, since the shards each reported other_shard_ranges = broker.get_shard_ranges( include_deleted=True) self.assert_shard_range_lists_equal( new_shard_ranges, other_shard_ranges, excludes=['meta_timestamp', 'state_timestamp']) for orig, updated in zip(orig_root_shard_ranges, other_shard_ranges): self.assertGreaterEqual(updated.meta_timestamp, orig['meta_timestamp']) self.assert_container_delete_fails() for obj in final_listing: client.delete_object( self.url, self.token, self.container_name, obj['name']) # the objects won't be listed anymore self.assert_container_listing([]) # but root container stats will not yet be aware of the deletions self.assert_container_delete_fails() # One server was down while the shard sharded its first two sub-shards, # so there may be undeleted handoff db(s) for sub-shard(s) that were # not fully replicated; run replicators now to clean up so they no # longer report bogus stats to root. self.replicators.once() # Run sharder so that shard containers update the root. Do not run # sharder on root container because that triggers shrinks which can # cause root object count to temporarily be non-zero and prevent the # final delete. self.run_sharders(self.get_container_shard_ranges()) # then root is empty and can be deleted self.assert_container_listing([]) self.assert_container_object_count(0) client.delete_container(self.url, self.token, self.container_name) def test_sharded_listing_no_replicators(self): self._test_sharded_listing() def test_sharded_listing_with_replicators(self): self._test_sharded_listing(run_replicators=True) def test_async_pendings(self): obj_names = self._make_object_names(self.max_shard_size * 2) # There are some updates *everyone* gets self.put_objects(obj_names[::5]) # But roll some outages so each container only get ~2/5 more object # records i.e. total of 3/5 updates per container; and async pendings # pile up for i, n in enumerate(self.brain.node_numbers, start=1): self.brain.servers.stop(number=n) self.put_objects(obj_names[i::5]) self.brain.servers.start(number=n) # But there are also 1/5 updates *no one* gets self.brain.servers.stop() self.put_objects(obj_names[4::5]) self.brain.servers.start() # Shard it client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) headers = client.head_container(self.url, self.admin_token, self.container_name) self.assertEqual('True', headers.get('x-container-sharding')) # sanity check found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 0) self.assertLengthEqual(found['normal_dbs'], 3) for db_file in found['normal_dbs']: broker = ContainerBroker(db_file) self.assertIs(True, broker.is_root_container()) self.assertEqual(len(obj_names) * 3 // 5, broker.get_info()['object_count']) # Only run the 'leader' in charge of scanning. # Each container has ~2 * max * 3/5 objects # which are distributed from obj000 to obj<2 * max - 1>, # so expect 3 shard ranges to be found: the first two will be complete # shards with max/2 objects and lower/upper bounds spaced by approx: # (2 * max - 1)/(2 * max * 3/5) * (max/2) =~ 5/6 * max # # Note that during this shard cycle the leader replicates to other # nodes so they will end up with ~2 * max * 4/5 objects. self.sharders.once(number=self.brain.node_numbers[0], additional_args='--partitions=%s' % self.brain.part) # Verify that we have one shard db -- though the other normal DBs # received the shard ranges that got defined found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 1) node_index_zero_db = found['shard_dbs'][0] broker = ContainerBroker(node_index_zero_db) self.assertIs(True, broker.is_root_container()) self.assertEqual(SHARDING, broker.get_db_state()) expected_shard_ranges = broker.get_shard_ranges() self.assertLengthEqual(expected_shard_ranges, 3) self.assertEqual( [ShardRange.CLEAVED, ShardRange.CLEAVED, ShardRange.CREATED], [sr.state for sr in expected_shard_ranges]) # Still have all three big DBs -- we've only cleaved 2 of the 3 shard # ranges that got defined self.assertLengthEqual(found['normal_dbs'], 3) db_states = [] for db_file in found['normal_dbs']: broker = ContainerBroker(db_file) self.assertIs(True, broker.is_root_container()) db_states.append(broker.get_db_state()) # the sharded db had shard range meta_timestamps updated during # cleaving, so we do not expect those to be equal on other nodes self.assert_shard_range_lists_equal( expected_shard_ranges, broker.get_shard_ranges(), excludes=['meta_timestamp', 'state_timestamp', 'state']) self.assertEqual(len(obj_names) * 3 // 5, broker.get_info()['object_count']) self.assertEqual([SHARDING, UNSHARDED, UNSHARDED], sorted(db_states)) # Run the other sharders so we're all in (roughly) the same state for n in self.brain.node_numbers[1:]: self.sharders.once( number=n, additional_args='--partitions=%s' % self.brain.part) found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 3) self.assertLengthEqual(found['normal_dbs'], 3) for db_file in found['normal_dbs']: broker = ContainerBroker(db_file) self.assertEqual(SHARDING, broker.get_db_state()) # no new rows self.assertEqual(len(obj_names) * 3 // 5, broker.get_info()['object_count']) # Run updaters to clear the async pendings Manager(['object-updater']).once() # Our "big" dbs didn't take updates for db_file in found['normal_dbs']: broker = ContainerBroker(db_file) self.assertEqual(len(obj_names) * 3 // 5, broker.get_info()['object_count']) # confirm that the async pending updates got redirected to the shards for sr in expected_shard_ranges: shard_listings = self.direct_get_container(sr.account, sr.container) for node, (hdrs, listing) in shard_listings.items(): shard_listing_names = [ o['name'].encode('utf-8') if six.PY2 else o['name'] for o in listing] for obj in obj_names[4::5]: if obj in sr: self.assertIn(obj, shard_listing_names) else: self.assertNotIn(obj, shard_listing_names) # The entire listing is not yet available - we have two cleaved shard # ranges, complete with async updates, but for the remainder of the # namespace only what landed in the original container headers, listing = client.get_container(self.url, self.token, self.container_name) start_listing = [ o for o in obj_names if o <= expected_shard_ranges[1].upper] self.assertEqual( [x['name'].encode('utf-8') if six.PY2 else x['name'] for x in listing[:len(start_listing)]], start_listing) # we can't assert much about the remaining listing, other than that # there should be something self.assertTrue( [x['name'].encode('utf-8') if six.PY2 else x['name'] for x in listing[len(start_listing):]]) self.assertIn('x-container-object-count', headers) self.assertEqual(str(len(listing)), headers['x-container-object-count']) headers, listing = client.get_container(self.url, self.token, self.container_name, query_string='reverse=on') self.assertEqual([x['name'].encode('utf-8') if six.PY2 else x['name'] for x in listing[-len(start_listing):]], list(reversed(start_listing))) self.assertIn('x-container-object-count', headers) self.assertEqual(str(len(listing)), headers['x-container-object-count']) self.assertTrue( [x['name'].encode('utf-8') if six.PY2 else x['name'] for x in listing[:-len(start_listing)]]) # Run the sharders again to get everything to settle self.sharders.once() found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 3) self.assertLengthEqual(found['normal_dbs'], 0) # now all shards have been cleaved we should get the complete listing headers, listing = client.get_container(self.url, self.token, self.container_name) self.assertEqual([x['name'].encode('utf-8') if six.PY2 else x['name'] for x in listing], obj_names) def test_shrinking(self): int_client = self.make_internal_client() def check_node_data(node_data, exp_hdrs, exp_obj_count, exp_shards, exp_sharded_root_range=False): hdrs, range_data = node_data self.assert_dict_contains(exp_hdrs, hdrs) sharded_root_range = False other_range_data = [] for data in range_data: sr = ShardRange.from_dict(data) if (sr.account == self.account and sr.container == self.container_name and sr.state == ShardRange.SHARDED): # only expect one root range self.assertFalse(sharded_root_range, range_data) sharded_root_range = True self.assertEqual(ShardRange.MIN, sr.lower, sr) self.assertEqual(ShardRange.MAX, sr.upper, sr) else: # include active root range in further assertions other_range_data.append(data) self.assertEqual(exp_sharded_root_range, sharded_root_range) self.assert_shard_ranges_contiguous(exp_shards, other_range_data) self.assert_total_object_count(exp_obj_count, other_range_data) def check_shard_nodes_data(node_data, expected_state='unsharded', expected_shards=0, exp_obj_count=0, exp_sharded_root_range=False): # checks that shard range is consistent on all nodes root_path = '%s/%s' % (self.account, self.container_name) exp_shard_hdrs = { 'X-Container-Sysmeta-Shard-Quoted-Root': quote(root_path), 'X-Backend-Sharding-State': expected_state} object_counts = [] bytes_used = [] for node_id, node_data in node_data.items(): with annotate_failure('Node id %s.' % node_id): check_node_data( node_data, exp_shard_hdrs, exp_obj_count, expected_shards, exp_sharded_root_range) hdrs = node_data[0] object_counts.append(int(hdrs['X-Container-Object-Count'])) bytes_used.append(int(hdrs['X-Container-Bytes-Used'])) if len(set(object_counts)) != 1: self.fail('Inconsistent object counts: %s' % object_counts) if len(set(bytes_used)) != 1: self.fail('Inconsistent bytes used: %s' % bytes_used) return object_counts[0], bytes_used[0] repeat = [0] def do_shard_then_shrink(): repeat[0] += 1 obj_names = ['obj-%s-%03d' % (repeat[0], x) for x in range(self.max_shard_size)] self.put_objects(obj_names) # these two object names will fall at start of first shard range... alpha = 'alpha-%s' % repeat[0] beta = 'beta-%s' % repeat[0] # Enable sharding client.post_container( self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) # sanity check self.assert_container_listing(obj_names) # Only run the one in charge of scanning self.sharders.once( number=self.brain.node_numbers[0], additional_args='--partitions=%s' % self.brain.part) # check root container root_nodes_data = self.direct_get_container_shard_ranges() self.assertEqual(3, len(root_nodes_data)) # nodes on which sharder has not run are still in unsharded state # but have had shard ranges replicated to them exp_obj_count = len(obj_names) exp_hdrs = {'X-Backend-Sharding-State': 'unsharded', 'X-Container-Object-Count': str(exp_obj_count)} node_id = self.brain.node_numbers[1] - 1 check_node_data( root_nodes_data[node_id], exp_hdrs, exp_obj_count, 2) node_id = self.brain.node_numbers[2] - 1 check_node_data( root_nodes_data[node_id], exp_hdrs, exp_obj_count, 2) # only one that ran sharder is in sharded state exp_hdrs['X-Backend-Sharding-State'] = 'sharded' node_id = self.brain.node_numbers[0] - 1 check_node_data( root_nodes_data[node_id], exp_hdrs, exp_obj_count, 2) orig_range_data = root_nodes_data[node_id][1] orig_shard_ranges = [ShardRange.from_dict(r) for r in orig_range_data] # check first shard shard_nodes_data = self.direct_get_container_shard_ranges( orig_shard_ranges[0].account, orig_shard_ranges[0].container) obj_count, bytes_used = check_shard_nodes_data(shard_nodes_data) total_shard_object_count = obj_count # check second shard shard_nodes_data = self.direct_get_container_shard_ranges( orig_shard_ranges[1].account, orig_shard_ranges[1].container) obj_count, bytes_used = check_shard_nodes_data(shard_nodes_data) total_shard_object_count += obj_count self.assertEqual(exp_obj_count, total_shard_object_count) # Now that everyone has shard ranges, run *everyone* self.sharders.once( additional_args='--partitions=%s' % self.brain.part) # all root container nodes should now be in sharded state root_nodes_data = self.direct_get_container_shard_ranges() self.assertEqual(3, len(root_nodes_data)) for node_id, node_data in root_nodes_data.items(): with annotate_failure('Node id %s.' % node_id): check_node_data(node_data, exp_hdrs, exp_obj_count, 2) # run updaters to update .sharded account; shard containers have # not updated account since having objects replicated to them self.updaters.once() shard_cont_count, shard_obj_count = int_client.get_account_info( orig_shard_ranges[0].account, [204]) self.assertEqual(2 * repeat[0], shard_cont_count) # the shards account should always have zero object count to avoid # double accounting self.assertEqual(0, shard_obj_count) # checking the listing also refreshes proxy container info cache so # that the proxy becomes aware that container is sharded and will # now look up the shard target for subsequent updates self.assert_container_listing(obj_names) # Before writing, kill the cache self.memcache.delete(get_cache_key( self.account, self.container_name, shard='updating')) # delete objects from first shard range first_shard_objects = [obj_name for obj_name in obj_names if obj_name <= orig_shard_ranges[0].upper] for obj in first_shard_objects: client.delete_object( self.url, self.token, self.container_name, obj) with self.assertRaises(ClientException): client.get_object( self.url, self.token, self.container_name, obj) second_shard_objects = [obj_name for obj_name in obj_names if obj_name > orig_shard_ranges[1].lower] self.assert_container_listing(second_shard_objects) # put a new object 'alpha' in first shard range self.put_objects([alpha]) second_shard_objects = [obj_name for obj_name in obj_names if obj_name > orig_shard_ranges[1].lower] self.assert_container_listing([alpha] + second_shard_objects) # while container servers are down, but proxy has container info in # cache from recent listing, put another object; this update will # lurk in async pending until the updaters run again; because all # the root container servers are down and therefore cannot respond # to a GET for a redirect target, the object update will default to # being targeted at the root container self.stop_container_servers() # Before writing, kill the cache self.memcache.delete(get_cache_key( self.account, self.container_name, shard='updating')) self.put_objects([beta]) self.brain.servers.start() async_pendings = self.gather_async_pendings( self.get_all_object_nodes()) num_container_replicas = len(self.brain.nodes) num_obj_replicas = self.policy.object_ring.replica_count expected_num_updates = num_container_updates( num_container_replicas, quorum_size(num_container_replicas), num_obj_replicas, self.policy.quorum) expected_num_pendings = min(expected_num_updates, num_obj_replicas) # sanity check with annotate_failure('policy %s. ' % self.policy): self.assertLengthEqual(async_pendings, expected_num_pendings) # root object count is not updated... self.assert_container_object_count(len(obj_names)) self.assert_container_listing([alpha] + second_shard_objects) root_nodes_data = self.direct_get_container_shard_ranges() self.assertEqual(3, len(root_nodes_data)) for node_id, node_data in root_nodes_data.items(): with annotate_failure('Node id %s.' % node_id): check_node_data(node_data, exp_hdrs, exp_obj_count, 2) range_data = node_data[1] self.assert_shard_range_lists_equal( orig_range_data, range_data, excludes=['meta_timestamp', 'state_timestamp']) # ...until the sharders run and update root; reclaim tombstones so # that the shard is shrinkable shard_0_part = self.get_part_and_node_numbers( orig_shard_ranges[0])[0] for conf_index in self.configs['container-sharder'].keys(): self.run_custom_sharder(conf_index, {'reclaim_age': 0}, override_partitions=[shard_0_part]) exp_obj_count = len(second_shard_objects) + 1 self.assert_container_object_count(exp_obj_count) self.assert_container_listing([alpha] + second_shard_objects) # root sharder finds donor, acceptor pair and pushes changes self.sharders.once( additional_args='--partitions=%s' % self.brain.part) self.assert_container_listing([alpha] + second_shard_objects) # run sharder on donor to shrink and replicate to acceptor self.run_sharders(orig_shard_ranges[0]) self.assert_container_listing([alpha] + second_shard_objects) # run sharder on acceptor to update root with stats self.run_sharders(orig_shard_ranges[1]) self.assert_container_listing([alpha] + second_shard_objects) self.assert_container_object_count(len(second_shard_objects) + 1) # check root container root_nodes_data = self.direct_get_container_shard_ranges() self.assertEqual(3, len(root_nodes_data)) exp_hdrs['X-Container-Object-Count'] = str(exp_obj_count) for node_id, node_data in root_nodes_data.items(): with annotate_failure('Node id %s.' % node_id): # NB now only *one* shard range in root check_node_data(node_data, exp_hdrs, exp_obj_count, 1) # the acceptor shard is intact.. shard_nodes_data = self.direct_get_container_shard_ranges( orig_shard_ranges[1].account, orig_shard_ranges[1].container) obj_count, bytes_used = check_shard_nodes_data(shard_nodes_data) # all objects should now be in this shard self.assertEqual(exp_obj_count, obj_count) # the donor shard is also still intact donor = orig_shard_ranges[0] shard_nodes_data = self.direct_get_container_shard_ranges( donor.account, donor.container) # the donor's shard range will have the acceptor's projected stats; # donor also has copy of root shard range that will be ignored; # note: expected_shards does not include the sharded root range obj_count, bytes_used = check_shard_nodes_data( shard_nodes_data, expected_state='sharded', expected_shards=1, exp_obj_count=len(second_shard_objects) + 1, exp_sharded_root_range=True) # but the donor is empty and so reports zero stats self.assertEqual(0, obj_count) self.assertEqual(0, bytes_used) # check the donor own shard range state part, nodes = self.brain.ring.get_nodes( donor.account, donor.container) for node in nodes: with annotate_failure(node): broker = self.get_broker( part, node, donor.account, donor.container) own_sr = broker.get_own_shard_range() self.assertEqual(ShardRange.SHRUNK, own_sr.state) self.assertTrue(own_sr.deleted) # delete all the second shard's object apart from 'alpha' for obj in second_shard_objects: client.delete_object( self.url, self.token, self.container_name, obj) self.assert_container_listing([alpha]) # run sharders: second range should not shrink away yet because it # has tombstones self.sharders.once() # second shard updates root stats self.assert_container_listing([alpha]) self.sharders.once() # root finds shrinkable shard self.assert_container_listing([alpha]) self.sharders.once() # shards shrink themselves self.assert_container_listing([alpha]) # the acceptor shard is intact... shard_nodes_data = self.direct_get_container_shard_ranges( orig_shard_ranges[1].account, orig_shard_ranges[1].container) obj_count, bytes_used = check_shard_nodes_data(shard_nodes_data) self.assertEqual(1, obj_count) # run sharders to reclaim tombstones so that the second shard is # shrinkable shard_1_part = self.get_part_and_node_numbers( orig_shard_ranges[1])[0] for conf_index in self.configs['container-sharder'].keys(): self.run_custom_sharder(conf_index, {'reclaim_age': 0}, override_partitions=[shard_1_part]) self.assert_container_listing([alpha]) # run sharders so second range shrinks away, requires up to 2 # cycles self.sharders.once() # root finds shrinkable shard self.assert_container_listing([alpha]) self.sharders.once() # shards shrink themselves self.assert_container_listing([alpha]) # the second shard range has sharded and is empty shard_nodes_data = self.direct_get_container_shard_ranges( orig_shard_ranges[1].account, orig_shard_ranges[1].container) check_shard_nodes_data( shard_nodes_data, expected_state='sharded', expected_shards=1, exp_obj_count=1) # check root container root_nodes_data = self.direct_get_container_shard_ranges() self.assertEqual(3, len(root_nodes_data)) exp_hdrs = {'X-Backend-Sharding-State': 'collapsed', # just the alpha object 'X-Container-Object-Count': '1'} for node_id, node_data in root_nodes_data.items(): with annotate_failure('Node id %s.' % node_id): # NB now no shard ranges in root check_node_data(node_data, exp_hdrs, 0, 0) # delete the alpha object client.delete_object( self.url, self.token, self.container_name, alpha) # should now be able to delete the *apparently* empty container client.delete_container(self.url, self.token, self.container_name) self.assert_container_not_found() self.direct_head_container(expect_failure=True) # and the container stays deleted even after sharders run and shard # send updates self.sharders.once() self.assert_container_not_found() self.direct_head_container(expect_failure=True) # now run updaters to deal with the async pending for the beta # object self.updaters.once() # and the container is revived! self.assert_container_listing([beta]) # finally, clear out the container client.delete_object( self.url, self.token, self.container_name, beta) do_shard_then_shrink() # repeat from starting point of a collapsed and previously deleted # container do_shard_then_shrink() def test_delete_root_reclaim(self): all_obj_names = self._make_object_names(self.max_shard_size) self.put_objects(all_obj_names) # Shard the container client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) for n in self.brain.node_numbers: self.sharders.once( number=n, additional_args='--partitions=%s' % self.brain.part) # sanity checks for node in self.brain.nodes: self.assert_container_state(node, 'sharded', 2) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') self.assert_container_listing(all_obj_names) # delete all objects - updates redirected to shards self.delete_objects(all_obj_names) self.assert_container_listing([]) self.assert_container_post_ok('has objects') # root not yet updated with shard stats self.assert_container_object_count(len(all_obj_names)) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() # run sharder on shard containers to update root stats shard_ranges = self.get_container_shard_ranges() self.assertLengthEqual(shard_ranges, 2) self.run_sharders(shard_ranges) self.assert_container_listing([]) self.assert_container_post_ok('empty') self.assert_container_object_count(0) # and now we can delete it! client.delete_container(self.url, self.token, self.container_name) self.assert_container_post_fails('deleted') self.assert_container_not_found() # see if it will reclaim Manager(['container-updater']).once() for conf_file in self.configs['container-replicator'].values(): conf = utils.readconf(conf_file, 'container-replicator') conf['reclaim_age'] = 0 ContainerReplicator(conf).run_once() # we don't expect warnings from sharder root audits for conf_index in self.configs['container-sharder'].keys(): sharder = self.run_custom_sharder(conf_index, {}) self.assertEqual([], sharder.logger.get_lines_for_level('warning')) # until the root wants to start reclaiming but we haven't shrunk yet! found_warning = False for conf_index in self.configs['container-sharder'].keys(): sharder = self.run_custom_sharder(conf_index, {'reclaim_age': 0}) warnings = sharder.logger.get_lines_for_level('warning') if warnings: self.assertTrue(warnings[0].startswith( 'Reclaimable db stuck waiting for shrinking')) self.assertEqual(1, len(warnings)) found_warning = True self.assertTrue(found_warning) # TODO: shrink empty shards and assert everything reclaims def _setup_replication_scenario(self, num_shards, extra_objs=('alpha',)): # Get cluster to state where 2 replicas are sharding or sharded but 3rd # replica is unsharded and has an object that the first 2 are missing. # put objects while all servers are up obj_names = self._make_object_names( num_shards * self.max_shard_size // 2) self.put_objects(obj_names) client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) node_numbers = self.brain.node_numbers # run replicators first time to get sync points set self.replicators.once() # stop the leader node and one other server self.stop_container_servers(slice(0, 2)) # ...then put one more object in first shard range namespace self.put_objects(extra_objs) # start leader and first other server, stop third server for number in node_numbers[:2]: self.brain.servers.start(number=number) self.brain.servers.stop(number=node_numbers[2]) self.assert_container_listing(obj_names) # sanity check # shard the container - first two shard ranges are cleaved for number in node_numbers[:2]: self.sharders.once( number=number, additional_args='--partitions=%s' % self.brain.part) self.assert_container_listing(obj_names) # sanity check return obj_names def test_replication_to_sharding_container(self): # verify that replication from an unsharded replica to a sharding # replica does not replicate rows but does replicate shard ranges obj_names = self._setup_replication_scenario(3) for node in self.brain.nodes[:2]: self.assert_container_state(node, 'sharding', 3) # bring third server back up, run replicator node_numbers = self.brain.node_numbers self.brain.servers.start(number=node_numbers[2]) # sanity check... self.assert_container_state(self.brain.nodes[2], 'unsharded', 0) self.replicators.once(number=node_numbers[2]) # check db files unchanged found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 2) self.assertLengthEqual(found['normal_dbs'], 3) # the 'alpha' object is NOT replicated to the two sharded nodes for node in self.brain.nodes[:2]: broker = self.get_broker(self.brain.part, node) with annotate_failure( 'Node id %s in %s' % (node['id'], self.brain.nodes[:2])): self.assertFalse(broker.get_objects()) self.assert_container_state(node, 'sharding', 3) self.brain.servers.stop(number=node_numbers[2]) self.assert_container_listing(obj_names) # all nodes now have shard ranges self.brain.servers.start(number=node_numbers[2]) node_data = self.direct_get_container_shard_ranges() for node, (hdrs, shard_ranges) in node_data.items(): with annotate_failure(node): self.assert_shard_ranges_contiguous(3, shard_ranges) # complete cleaving third shard range on first two nodes self.brain.servers.stop(number=node_numbers[2]) for number in node_numbers[:2]: self.sharders.once( number=number, additional_args='--partitions=%s' % self.brain.part) # ...and now they are in sharded state self.assert_container_state(self.brain.nodes[0], 'sharded', 3) self.assert_container_state(self.brain.nodes[1], 'sharded', 3) # ...still no 'alpha' object in listing self.assert_container_listing(obj_names) # run the sharder on the third server, alpha object is included in # shards that it cleaves self.brain.servers.start(number=node_numbers[2]) self.assert_container_state(self.brain.nodes[2], 'unsharded', 3) self.sharders.once(number=node_numbers[2], additional_args='--partitions=%s' % self.brain.part) self.assert_container_state(self.brain.nodes[2], 'sharding', 3) self.sharders.once(number=node_numbers[2], additional_args='--partitions=%s' % self.brain.part) self.assert_container_state(self.brain.nodes[2], 'sharded', 3) self.assert_container_listing(['alpha'] + obj_names) def test_replication_to_sharded_container(self): # verify that replication from an unsharded replica to a sharded # replica does not replicate rows but does replicate shard ranges obj_names = self._setup_replication_scenario(2) for node in self.brain.nodes[:2]: self.assert_container_state(node, 'sharded', 2) # sanity check found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 2) self.assertLengthEqual(found['normal_dbs'], 1) for node in self.brain.nodes[:2]: broker = self.get_broker(self.brain.part, node) info = broker.get_info() with annotate_failure( 'Node id %s in %s' % (node['id'], self.brain.nodes[:2])): self.assertEqual(len(obj_names), info['object_count']) self.assertFalse(broker.get_objects()) # bring third server back up, run replicator node_numbers = self.brain.node_numbers self.brain.servers.start(number=node_numbers[2]) # sanity check... self.assert_container_state(self.brain.nodes[2], 'unsharded', 0) self.replicators.once(number=node_numbers[2]) # check db files unchanged found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 2) self.assertLengthEqual(found['normal_dbs'], 1) # the 'alpha' object is NOT replicated to the two sharded nodes for node in self.brain.nodes[:2]: broker = self.get_broker(self.brain.part, node) with annotate_failure( 'Node id %s in %s' % (node['id'], self.brain.nodes[:2])): self.assertFalse(broker.get_objects()) self.assert_container_state(node, 'sharded', 2) self.brain.servers.stop(number=node_numbers[2]) self.assert_container_listing(obj_names) # all nodes now have shard ranges self.brain.servers.start(number=node_numbers[2]) node_data = self.direct_get_container_shard_ranges() for node, (hdrs, shard_ranges) in node_data.items(): with annotate_failure(node): self.assert_shard_ranges_contiguous(2, shard_ranges) # run the sharder on the third server, alpha object is included in # shards that it cleaves self.assert_container_state(self.brain.nodes[2], 'unsharded', 2) self.sharders.once(number=node_numbers[2], additional_args='--partitions=%s' % self.brain.part) self.assert_container_state(self.brain.nodes[2], 'sharded', 2) self.assert_container_listing(['alpha'] + obj_names) def test_sharding_requires_sufficient_replication(self): # verify that cleaving only progresses if each cleaved shard range is # sufficiently replicated # put enough objects for 4 shard ranges obj_names = self._make_object_names(2 * self.max_shard_size) self.put_objects(obj_names) client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) node_numbers = self.brain.node_numbers leader_node = self.brain.nodes[0] leader_num = node_numbers[0] # run replicators first time to get sync points set self.replicators.once() # start sharding on the leader node self.sharders.once(number=leader_num, additional_args='--partitions=%s' % self.brain.part) shard_ranges = self.assert_container_state(leader_node, 'sharding', 4) self.assertEqual([ShardRange.CLEAVED] * 2 + [ShardRange.CREATED] * 2, [sr.state for sr in shard_ranges]) # Check the current progress. It shouldn't be complete. recon = direct_client.direct_get_recon(leader_node, "sharding") expected_in_progress = {'all': [{'account': 'AUTH_test', 'active': 0, 'cleaved': 2, 'created': 2, 'found': 0, 'db_state': 'sharding', 'state': 'sharding', 'error': None, 'file_size': mock.ANY, 'meta_timestamp': mock.ANY, 'node_index': 0, 'object_count': len(obj_names), 'container': mock.ANY, 'path': mock.ANY, 'root': mock.ANY}]} actual = recon['sharding_stats']['sharding']['sharding_in_progress'] self.assertEqual(expected_in_progress, actual) # stop *all* container servers for third shard range sr_part, sr_node_nums = self.get_part_and_node_numbers(shard_ranges[2]) for node_num in sr_node_nums: self.brain.servers.stop(number=node_num) # attempt to continue sharding on the leader node self.sharders.once(number=leader_num, additional_args='--partitions=%s' % self.brain.part) # no cleaving progress was made for node_num in sr_node_nums: self.brain.servers.start(number=node_num) shard_ranges = self.assert_container_state(leader_node, 'sharding', 4) self.assertEqual([ShardRange.CLEAVED] * 2 + [ShardRange.CREATED] * 2, [sr.state for sr in shard_ranges]) # stop two of the servers for third shard range, not including any # server that happens to be the leader node stopped = [] for node_num in sr_node_nums: if node_num != leader_num: self.brain.servers.stop(number=node_num) stopped.append(node_num) if len(stopped) >= 2: break self.assertLengthEqual(stopped, 2) # sanity check # attempt to continue sharding on the leader node self.sharders.once(number=leader_num, additional_args='--partitions=%s' % self.brain.part) # no cleaving progress was made for node_num in stopped: self.brain.servers.start(number=node_num) shard_ranges = self.assert_container_state(leader_node, 'sharding', 4) self.assertEqual([ShardRange.CLEAVED] * 2 + [ShardRange.CREATED] * 2, [sr.state for sr in shard_ranges]) # stop just one of the servers for third shard range stopped = [] for node_num in sr_node_nums: if node_num != leader_num: self.brain.servers.stop(number=node_num) stopped.append(node_num) break self.assertLengthEqual(stopped, 1) # sanity check # attempt to continue sharding the container self.sharders.once(number=leader_num, additional_args='--partitions=%s' % self.brain.part) # this time cleaving completed self.brain.servers.start(number=stopped[0]) shard_ranges = self.assert_container_state(leader_node, 'sharded', 4) self.assertEqual([ShardRange.ACTIVE] * 4, [sr.state for sr in shard_ranges]) # Check the leader's progress again, this time is should be complete recon = direct_client.direct_get_recon(leader_node, "sharding") expected_in_progress = {'all': [{'account': 'AUTH_test', 'active': 4, 'cleaved': 0, 'created': 0, 'found': 0, 'db_state': 'sharded', 'state': 'sharded', 'error': None, 'file_size': mock.ANY, 'meta_timestamp': mock.ANY, 'node_index': 0, 'object_count': len(obj_names), 'container': mock.ANY, 'path': mock.ANY, 'root': mock.ANY}]} actual = recon['sharding_stats']['sharding']['sharding_in_progress'] self.assertEqual(expected_in_progress, actual) def test_sharded_delete(self): all_obj_names = self._make_object_names(self.max_shard_size) self.put_objects(all_obj_names) # Shard the container client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) for n in self.brain.node_numbers: self.sharders.once( number=n, additional_args='--partitions=%s' % self.brain.part) # sanity checks for node in self.brain.nodes: self.assert_container_state(node, 'sharded', 2) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') self.assert_container_listing(all_obj_names) # delete all objects - updates redirected to shards self.delete_objects(all_obj_names) self.assert_container_listing([]) self.assert_container_post_ok('has objects') # root not yet updated with shard stats self.assert_container_object_count(len(all_obj_names)) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() # run sharder on shard containers to update root stats shard_ranges = self.get_container_shard_ranges() self.assertLengthEqual(shard_ranges, 2) self.run_sharders(shard_ranges) self.assert_container_listing([]) self.assert_container_post_ok('empty') self.assert_container_object_count(0) # put a new object - update redirected to shard self.put_objects(['alpha']) self.assert_container_listing(['alpha']) self.assert_container_object_count(0) # before root learns about new object in shard, delete the container client.delete_container(self.url, self.token, self.container_name) self.assert_container_post_fails('deleted') self.assert_container_not_found() # run the sharders to update root with shard stats self.run_sharders(shard_ranges) self.assert_container_listing(['alpha']) self.assert_container_object_count(1) self.assert_container_delete_fails() self.assert_container_post_ok('revived') def test_object_update_redirection(self): all_obj_names = self._make_object_names(self.max_shard_size) self.put_objects(all_obj_names) # Shard the container client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) for n in self.brain.node_numbers: self.sharders.once( number=n, additional_args='--partitions=%s' % self.brain.part) # sanity checks for node in self.brain.nodes: self.assert_container_state(node, 'sharded', 2) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') self.assert_container_listing(all_obj_names) # delete all objects - updates redirected to shards self.delete_objects(all_obj_names) self.assert_container_listing([]) self.assert_container_post_ok('has objects') # run sharder on shard containers to update root stats; reclaim # the tombstones so that the shards appear to be shrinkable shard_ranges = self.get_container_shard_ranges() self.assertLengthEqual(shard_ranges, 2) shard_partitions = [self.get_part_and_node_numbers(sr)[0] for sr in shard_ranges] for conf_index in self.configs['container-sharder'].keys(): self.run_custom_sharder(conf_index, {'reclaim_age': 0}, override_partitions=shard_partitions) self.assert_container_object_count(0) # First, test a misplaced object moving from one shard to another. # with one shard server down, put a new 'alpha' object... shard_part, shard_nodes = self.get_part_and_node_numbers( shard_ranges[0]) self.brain.servers.stop(number=shard_nodes[2]) self.put_objects(['alpha']) self.assert_container_listing(['alpha']) self.assert_container_object_count(0) self.assertLengthEqual( self.gather_async_pendings(self.get_all_object_nodes()), 1) self.brain.servers.start(number=shard_nodes[2]) # run sharder on root to discover first shrink candidate self.sharders.once(additional_args='--partitions=%s' % self.brain.part) # then run sharder on the shard node without the alpha object self.sharders.once(additional_args='--partitions=%s' % shard_part, number=shard_nodes[2]) # root sees first shard has shrunk self.assertLengthEqual(self.get_container_shard_ranges(), 1) # cached shard ranges still show first shard range as active so listing # will include 'alpha' if the shard listing is fetched from node (0,1) # but not if fetched from node 2; to achieve predictability we use # x-newest to use shard ranges from the root so that only the second # shard range is used for listing, so alpha object not in listing self.assert_container_listing([], req_hdrs={'x-newest': 'true'}) self.assert_container_object_count(0) # run the updaters: the async pending update will be redirected from # shrunk shard to second shard self.updaters.once() self.assert_container_listing(['alpha']) self.assert_container_object_count(0) # root not yet updated # then run sharder on other shard nodes to complete shrinking for number in shard_nodes[:2]: self.sharders.once(additional_args='--partitions=%s' % shard_part, number=number) # and get root updated self.run_sharders(shard_ranges[1]) self.assert_container_listing(['alpha']) self.assert_container_object_count(1) self.assertLengthEqual(self.get_container_shard_ranges(), 1) # Now we have just one active shard, test a misplaced object moving # from that shard to the root. # with one shard server down, delete 'alpha' and put a 'beta' object... shard_part, shard_nodes = self.get_part_and_node_numbers( shard_ranges[1]) self.brain.servers.stop(number=shard_nodes[2]) # Before writing, kill the cache self.memcache.delete(get_cache_key( self.account, self.container_name, shard='updating')) self.delete_objects(['alpha']) self.put_objects(['beta']) self.assert_container_listing(['beta']) self.assert_container_object_count(1) self.assertLengthEqual( self.gather_async_pendings(self.get_all_object_nodes()), 2) self.brain.servers.start(number=shard_nodes[2]) # run sharder on root to discover second shrink candidate - root is not # yet aware of the beta object self.sharders.once(additional_args='--partitions=%s' % self.brain.part) # then run sharder on the shard node without the beta object, to shrink # it to root - note this moves stale copy of alpha to the root db self.sharders.once(additional_args='--partitions=%s' % shard_part, number=shard_nodes[2]) # now there are no active shards self.assertFalse(self.get_container_shard_ranges()) # with other two shard servers down, listing won't find beta object for number in shard_nodes[:2]: self.brain.servers.stop(number=number) self.assert_container_listing(['alpha']) self.assert_container_object_count(1) # run the updaters: the async pending update will be redirected from # shrunk shard to the root self.updaters.once() self.assert_container_listing(['beta']) self.assert_container_object_count(1) def test_misplaced_object_movement(self): def merge_object(shard_range, name, deleted=0): # it's hard to get a test to put a misplaced object into a shard, # so this hack is used force an object record directly into a shard # container db. Note: the actual object won't exist, we're just # using this to test object records in container dbs. shard_part, shard_nodes = self.brain.ring.get_nodes( shard_range.account, shard_range.container) shard_broker = self.get_broker( shard_part, shard_nodes[0], shard_range.account, shard_range.container) shard_broker.merge_items( [{'name': name, 'created_at': Timestamp.now().internal, 'size': 0, 'content_type': 'text/plain', 'etag': md5(usedforsecurity=False).hexdigest(), 'deleted': deleted, 'storage_policy_index': shard_broker.storage_policy_index}]) return shard_nodes[0] all_obj_names = self._make_object_names(self.max_shard_size) self.put_objects(all_obj_names) # Shard the container client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) for n in self.brain.node_numbers: self.sharders.once( number=n, additional_args='--partitions=%s' % self.brain.part) # sanity checks for node in self.brain.nodes: self.assert_container_state(node, 'sharded', 2) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') self.assert_container_listing(all_obj_names) # delete all objects in first shard range - updates redirected to shard shard_ranges = self.get_container_shard_ranges() self.assertLengthEqual(shard_ranges, 2) shard_0_objects = [name for name in all_obj_names if name in shard_ranges[0]] shard_1_objects = [name for name in all_obj_names if name in shard_ranges[1]] self.delete_objects(shard_0_objects) self.assert_container_listing(shard_1_objects) self.assert_container_post_ok('has objects') # run sharder on first shard container to update root stats; reclaim # the tombstones so that the shard appears to be shrinkable shard_0_part = self.get_part_and_node_numbers(shard_ranges[0])[0] for conf_index in self.configs['container-sharder'].keys(): self.run_custom_sharder(conf_index, {'reclaim_age': 0}, override_partitions=[shard_0_part]) self.assert_container_object_count(len(shard_1_objects)) # First, test a misplaced object moving from one shard to another. # run sharder on root to discover first shrink candidate self.sharders.once(additional_args='--partitions=%s' % self.brain.part) # then run sharder on first shard range to shrink it self.run_sharders(shard_ranges[0]) # force a misplaced object into the shrunken shard range to simulate # a client put that was in flight when it started to shrink misplaced_node = merge_object(shard_ranges[0], 'alpha', deleted=0) # root sees first shard has shrunk, only second shard range used for # listing so alpha object not in listing self.assertLengthEqual(self.get_container_shard_ranges(), 1) self.assert_container_listing(shard_1_objects) self.assert_container_object_count(len(shard_1_objects)) # until sharder runs on that node to move the misplaced object to the # second shard range shard_part, shard_nodes_numbers = self.get_part_and_node_numbers( shard_ranges[0]) self.sharders.once(additional_args='--partitions=%s' % shard_part, number=misplaced_node['id'] + 1) self.assert_container_listing(['alpha'] + shard_1_objects) # root not yet updated self.assert_container_object_count(len(shard_1_objects)) # run sharder to get root updated self.run_sharders(shard_ranges[1]) self.assert_container_listing(['alpha'] + shard_1_objects) self.assert_container_object_count(len(shard_1_objects) + 1) self.assertLengthEqual(self.get_container_shard_ranges(), 1) # Now we have just one active shard, test a misplaced object moving # from that shard to the root. # delete most objects from second shard range, reclaim the tombstones, # and run sharder on root to discover second shrink candidate self.delete_objects(shard_1_objects) shard_1_part = self.get_part_and_node_numbers(shard_ranges[1])[0] for conf_index in self.configs['container-sharder'].keys(): self.run_custom_sharder(conf_index, {'reclaim_age': 0}, override_partitions=[shard_1_part]) self.sharders.once(additional_args='--partitions=%s' % self.brain.part) # then run sharder on the shard node to shrink it to root - note this # moves alpha to the root db self.run_sharders(shard_ranges[1]) # now there are no active shards self.assertFalse(self.get_container_shard_ranges()) # force some misplaced object updates into second shrunk shard range merge_object(shard_ranges[1], 'alpha', deleted=1) misplaced_node = merge_object(shard_ranges[1], 'beta', deleted=0) # root is not yet aware of them self.assert_container_listing(['alpha']) self.assert_container_object_count(1) # until sharder runs on that node to move the misplaced object shard_part, shard_nodes_numbers = self.get_part_and_node_numbers( shard_ranges[1]) self.sharders.once(additional_args='--partitions=%s' % shard_part, number=misplaced_node['id'] + 1) self.assert_container_listing(['beta']) self.assert_container_object_count(1) self.assert_container_delete_fails() def test_misplaced_object_movement_from_deleted_shard(self): def merge_object(shard_range, name, deleted=0): # it's hard to get a test to put a misplaced object into a shard, # so this hack is used force an object record directly into a shard # container db. Note: the actual object won't exist, we're just # using this to test object records in container dbs. shard_part, shard_nodes = self.brain.ring.get_nodes( shard_range.account, shard_range.container) shard_broker = self.get_shard_broker(shard_range) # In this test we want to merge into a deleted container shard shard_broker.delete_db(Timestamp.now().internal) shard_broker.merge_items( [{'name': name, 'created_at': Timestamp.now().internal, 'size': 0, 'content_type': 'text/plain', 'etag': md5(usedforsecurity=False).hexdigest(), 'deleted': deleted, 'storage_policy_index': shard_broker.storage_policy_index}]) return shard_nodes[0] all_obj_names = self._make_object_names(self.max_shard_size) self.put_objects(all_obj_names) # Shard the container client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) for n in self.brain.node_numbers: self.sharders.once( number=n, additional_args='--partitions=%s' % self.brain.part) # sanity checks for node in self.brain.nodes: self.assert_container_state(node, 'sharded', 2) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') self.assert_container_listing(all_obj_names) # delete all objects in first shard range - updates redirected to shard shard_ranges = self.get_container_shard_ranges() self.assertLengthEqual(shard_ranges, 2) shard_0_objects = [name for name in all_obj_names if name in shard_ranges[0]] shard_1_objects = [name for name in all_obj_names if name in shard_ranges[1]] self.delete_objects(shard_0_objects) self.assert_container_listing(shard_1_objects) self.assert_container_post_ok('has objects') # run sharder on first shard container to update root stats shard_0_part = self.get_part_and_node_numbers(shard_ranges[0])[0] for conf_index in self.configs['container-sharder'].keys(): self.run_custom_sharder(conf_index, {'reclaim_age': 0}, override_partitions=[shard_0_part]) self.assert_container_object_count(len(shard_1_objects)) # First, test a misplaced object moving from one shard to another. # run sharder on root to discover first shrink candidate self.sharders.once(additional_args='--partitions=%s' % self.brain.part) # then run sharder on first shard range to shrink it self.run_sharders(shard_ranges[0]) # force a misplaced object into the shrunken shard range to simulate # a client put that was in flight when it started to shrink misplaced_node = merge_object(shard_ranges[0], 'alpha', deleted=0) # root sees first shard has shrunk, only second shard range used for # listing so alpha object not in listing self.assertLengthEqual(self.get_container_shard_ranges(), 1) self.assert_container_listing(shard_1_objects) self.assert_container_object_count(len(shard_1_objects)) # until sharder runs on that node to move the misplaced object to the # second shard range shard_part, shard_nodes_numbers = self.get_part_and_node_numbers( shard_ranges[0]) self.sharders.once(additional_args='--partitions=%s' % shard_part, number=misplaced_node['id'] + 1) self.assert_container_listing(['alpha'] + shard_1_objects) # root not yet updated self.assert_container_object_count(len(shard_1_objects)) # check the deleted shard did not push the wrong root path into the # other container for replica in 0, 1, 2: shard_x_broker = self.get_shard_broker(shard_ranges[1], replica) self.assertEqual("%s/%s" % (self.account, self.container_name), shard_x_broker.root_path) # run the sharder of the existing shard to update the root stats # to prove the misplaced object was moved to the other shard _and_ # the other shard still has the correct root because it updates root's # stats self.run_sharders(shard_ranges[1]) self.assert_container_object_count(len(shard_1_objects) + 1) def test_replication_to_sharded_container_from_unsharded_old_primary(self): primary_ids = [n['id'] for n in self.brain.nodes] handoff_node = next(n for n in self.brain.ring.devs if n['id'] not in primary_ids) # start with two sharded replicas and one unsharded with extra object obj_names = self._setup_replication_scenario(2) for node in self.brain.nodes[:2]: self.assert_container_state(node, 'sharded', 2) # Fake a ring change - copy unsharded db which has no shard ranges to a # handoff to create illusion of a new unpopulated primary node node_numbers = self.brain.node_numbers new_primary_node = self.brain.nodes[2] new_primary_node_number = node_numbers[2] new_primary_dir, container_hash = self.get_storage_dir( self.brain.part, new_primary_node) old_primary_dir, container_hash = self.get_storage_dir( self.brain.part, handoff_node) utils.mkdirs(os.path.dirname(old_primary_dir)) shutil.move(new_primary_dir, old_primary_dir) # make the cluster more or less "healthy" again self.brain.servers.start(number=new_primary_node_number) # get a db on every node... client.put_container(self.url, self.token, self.container_name) self.assertTrue(os.path.exists(os.path.join( new_primary_dir, container_hash + '.db'))) found = self.categorize_container_dir_content() self.assertLengthEqual(found['normal_dbs'], 1) # "new" primary self.assertLengthEqual(found['shard_dbs'], 2) # existing primaries # catastrophic failure! drive dies and is replaced on unchanged primary failed_node = self.brain.nodes[0] failed_dir, _container_hash = self.get_storage_dir( self.brain.part, failed_node) shutil.rmtree(failed_dir) # replicate the "old primary" to everybody except the "new primary" self.brain.servers.stop(number=new_primary_node_number) self.replicators.once(number=handoff_node['id'] + 1) # We're willing to rsync the retiring db to the failed primary. # This may or may not have shard ranges, depending on the order in # which we hit the primaries, but it definitely *doesn't* have an # epoch in its name yet. All objects are replicated. self.assertTrue(os.path.exists(os.path.join( failed_dir, container_hash + '.db'))) self.assertLengthEqual(os.listdir(failed_dir), 1) broker = self.get_broker(self.brain.part, failed_node) self.assertLengthEqual(broker.get_objects(), len(obj_names) + 1) # The other out-of-date primary is within usync range but objects are # not replicated to it because the handoff db learns about shard ranges broker = self.get_broker(self.brain.part, self.brain.nodes[1]) self.assertLengthEqual(broker.get_objects(), 0) # Handoff db still exists and now has shard ranges! self.assertTrue(os.path.exists(os.path.join( old_primary_dir, container_hash + '.db'))) broker = self.get_broker(self.brain.part, handoff_node) shard_ranges = broker.get_shard_ranges() self.assertLengthEqual(shard_ranges, 2) self.assert_container_state(handoff_node, 'unsharded', 2) # Replicate again, this time *including* "new primary" self.brain.servers.start(number=new_primary_node_number) self.replicators.once(number=handoff_node['id'] + 1) # Ordinarily, we would have rsync_then_merge'd to "new primary" # but instead we wait broker = self.get_broker(self.brain.part, new_primary_node) self.assertLengthEqual(broker.get_objects(), 0) shard_ranges = broker.get_shard_ranges() self.assertLengthEqual(shard_ranges, 2) # so the next time the sharder comes along, it can push rows out # and delete the big db self.sharders.once(number=handoff_node['id'] + 1, additional_args='--partitions=%s' % self.brain.part) self.assert_container_state(handoff_node, 'sharded', 2) self.assertFalse(os.path.exists(os.path.join( old_primary_dir, container_hash + '.db'))) # the sharded db hangs around until replication confirms durability # first attempt is not sufficiently successful self.brain.servers.stop(number=node_numbers[0]) self.replicators.once(number=handoff_node['id'] + 1) self.assertTrue(os.path.exists(old_primary_dir)) self.assert_container_state(handoff_node, 'sharded', 2) # second attempt is successful and handoff db is deleted self.brain.servers.start(number=node_numbers[0]) self.replicators.once(number=handoff_node['id'] + 1) self.assertFalse(os.path.exists(old_primary_dir)) # run all the sharders, get us into a consistent state self.sharders.once(additional_args='--partitions=%s' % self.brain.part) self.assert_container_listing(['alpha'] + obj_names) def test_replication_to_empty_new_primary_from_sharding_old_primary(self): primary_ids = [n['id'] for n in self.brain.nodes] handoff_node = next(n for n in self.brain.ring.devs if n['id'] not in primary_ids) num_shards = 3 obj_names = self._make_object_names( num_shards * self.max_shard_size // 2) self.put_objects(obj_names) client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) # run replicators first time to get sync points set self.replicators.once() # start sharding on only the leader node leader_node = self.brain.nodes[0] leader_node_number = self.brain.node_numbers[0] self.sharders.once(number=leader_node_number) self.assert_container_state(leader_node, 'sharding', 3) for node in self.brain.nodes[1:]: self.assert_container_state(node, 'unsharded', 3) # Fake a ring change - copy leader node db to a handoff to create # illusion of a new unpopulated primary leader node new_primary_dir, container_hash = self.get_storage_dir( self.brain.part, leader_node) old_primary_dir, container_hash = self.get_storage_dir( self.brain.part, handoff_node) utils.mkdirs(os.path.dirname(old_primary_dir)) shutil.move(new_primary_dir, old_primary_dir) self.assert_container_state(handoff_node, 'sharding', 3) # run replicator on handoff node to create a fresh db on new primary self.assertFalse(os.path.exists(new_primary_dir)) self.replicators.once(number=handoff_node['id'] + 1) self.assertTrue(os.path.exists(new_primary_dir)) self.assert_container_state(leader_node, 'sharded', 3) broker = self.get_broker(self.brain.part, leader_node) shard_ranges = broker.get_shard_ranges() self.assertLengthEqual(shard_ranges, 3) self.assertEqual( [ShardRange.CLEAVED, ShardRange.CLEAVED, ShardRange.CREATED], [sr.state for sr in shard_ranges]) # db still exists on handoff self.assertTrue(os.path.exists(old_primary_dir)) self.assert_container_state(handoff_node, 'sharding', 3) # continue sharding it... self.sharders.once(number=handoff_node['id'] + 1) self.assert_container_state(leader_node, 'sharded', 3) # now handoff is fully sharded the replicator will delete it self.replicators.once(number=handoff_node['id'] + 1) self.assertFalse(os.path.exists(old_primary_dir)) # all primaries now have active shard ranges but only one is in sharded # state self.assert_container_state(leader_node, 'sharded', 3) for node in self.brain.nodes[1:]: self.assert_container_state(node, 'unsharded', 3) node_data = self.direct_get_container_shard_ranges() for node_id, (hdrs, shard_ranges) in node_data.items(): with annotate_failure( 'node id %s from %s' % (node_id, node_data.keys)): self.assert_shard_range_state(ShardRange.ACTIVE, shard_ranges) # check handoff cleaved all objects before it was deleted - stop all # but leader node so that listing is fetched from shards for number in self.brain.node_numbers[1:3]: self.brain.servers.stop(number=number) self.assert_container_listing(obj_names) for number in self.brain.node_numbers[1:3]: self.brain.servers.start(number=number) self.sharders.once() self.assert_container_state(leader_node, 'sharded', 3) for node in self.brain.nodes[1:]: self.assert_container_state(node, 'sharding', 3) self.sharders.once() for node in self.brain.nodes: self.assert_container_state(node, 'sharded', 3) self.assert_container_listing(obj_names) def test_sharded_account_updates(self): # verify that .shards account updates have zero object count and bytes # to avoid double accounting all_obj_names = self._make_object_names(self.max_shard_size) self.put_objects(all_obj_names, contents='xyz') # Shard the container into 2 shards client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) for n in self.brain.node_numbers: self.sharders.once( number=n, additional_args='--partitions=%s' % self.brain.part) # sanity checks for node in self.brain.nodes: shard_ranges = self.assert_container_state(node, 'sharded', 2) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') self.assert_container_listing(all_obj_names) # run the updaters to get account stats updated self.updaters.once() # check user account stats metadata = self.internal_client.get_account_metadata(self.account) self.assertEqual(1, int(metadata.get('x-account-container-count'))) self.assertEqual(self.max_shard_size, int(metadata.get('x-account-object-count'))) self.assertEqual(3 * self.max_shard_size, int(metadata.get('x-account-bytes-used'))) # check hidden .shards account stats metadata = self.internal_client.get_account_metadata( shard_ranges[0].account) self.assertEqual(2, int(metadata.get('x-account-container-count'))) self.assertEqual(0, int(metadata.get('x-account-object-count'))) self.assertEqual(0, int(metadata.get('x-account-bytes-used'))) class TestContainerShardingMoreUTF8(TestContainerSharding): def _make_object_names(self, number): # override default with names that include non-ascii chars name_length = self.cluster_info['swift']['max_object_name_length'] obj_names = [] for x in range(number): name = (u'obj-\u00e4\u00ea\u00ec\u00f2\u00fb-%04d' % x) name = name.encode('utf8').ljust(name_length, b'o') if not six.PY2: name = name.decode('utf8') obj_names.append(name) return obj_names def _setup_container_name(self): # override default with max length name that includes non-ascii chars super(TestContainerShardingMoreUTF8, self)._setup_container_name() name_length = self.cluster_info['swift']['max_container_name_length'] cont_name = \ self.container_name + u'-\u00e4\u00ea\u00ec\u00f2\u00fb\u1234' self.container_name = cont_name.encode('utf8').ljust(name_length, b'x') if not six.PY2: self.container_name = self.container_name.decode('utf8') class TestManagedContainerSharding(BaseTestContainerSharding): '''Test sharding using swift-manage-shard-ranges''' def sharders_once(self, **kwargs): # inhibit auto_sharding regardless of the config setting additional_args = kwargs.get('additional_args', []) if not isinstance(additional_args, list): additional_args = [additional_args] additional_args.append('--no-auto-shard') kwargs['additional_args'] = additional_args self.sharders.once(**kwargs) def test_manage_shard_ranges(self): obj_names = self._make_object_names(7) self.put_objects(obj_names) client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) # run replicators first time to get sync points set self.replicators.once() # sanity check: we don't have nearly enough objects for this to shard # automatically self.sharders_once(number=self.brain.node_numbers[0], additional_args='--partitions=%s' % self.brain.part) self.assert_container_state(self.brain.nodes[0], 'unsharded', 0) self.assert_subprocess_success([ 'swift-manage-shard-ranges', self.get_db_file(self.brain.part, self.brain.nodes[0]), 'find_and_replace', '3', '--enable', '--minimum-shard-size', '2']) self.assert_container_state(self.brain.nodes[0], 'unsharded', 2) # "Run container-replicator to replicate them to other nodes." self.replicators.once() # "Run container-sharder on all nodes to shard the container." self.sharders_once(additional_args='--partitions=%s' % self.brain.part) # Everybody's settled self.assert_container_state(self.brain.nodes[0], 'sharded', 2) self.assert_container_state(self.brain.nodes[1], 'sharded', 2) self.assert_container_state(self.brain.nodes[2], 'sharded', 2) self.assert_container_listing(obj_names) def test_manage_shard_ranges_compact(self): # verify shard range compaction using swift-manage-shard-ranges obj_names = self._make_object_names(8) self.put_objects(obj_names) client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) # run replicators first time to get sync points set, and get container # sharded into 4 shards self.replicators.once() self.assert_subprocess_success([ 'swift-manage-shard-ranges', self.get_db_file(self.brain.part, self.brain.nodes[0]), 'find_and_replace', '2', '--enable']) self.assert_container_state(self.brain.nodes[0], 'unsharded', 4) self.replicators.once() # run sharders twice to cleave all 4 shard ranges self.sharders_once(additional_args='--partitions=%s' % self.brain.part) self.sharders_once(additional_args='--partitions=%s' % self.brain.part) self.assert_container_state(self.brain.nodes[0], 'sharded', 4) self.assert_container_state(self.brain.nodes[1], 'sharded', 4) self.assert_container_state(self.brain.nodes[2], 'sharded', 4) self.assert_container_listing(obj_names) # now compact some ranges; use --max-shrinking to allow 2 shrinking # shards self.assert_subprocess_success([ 'swift-manage-shard-ranges', self.get_db_file(self.brain.part, self.brain.nodes[0]), 'compact', '--max-expanding', '1', '--max-shrinking', '2', '--yes']) shard_ranges = self.assert_container_state( self.brain.nodes[0], 'sharded', 4) self.assertEqual([ShardRange.SHRINKING] * 2 + [ShardRange.ACTIVE] * 2, [sr.state for sr in shard_ranges]) self.replicators.once() self.sharders_once() # check there's now just 2 remaining shard ranges shard_ranges = self.assert_container_state( self.brain.nodes[0], 'sharded', 2) self.assertEqual([ShardRange.ACTIVE] * 2, [sr.state for sr in shard_ranges]) self.assert_container_listing(obj_names, req_hdrs={'X-Newest': 'True'}) # root container own shard range should still be SHARDED for i, node in enumerate(self.brain.nodes): with annotate_failure('node[%d]' % i): broker = self.get_broker(self.brain.part, self.brain.nodes[0]) self.assertEqual(ShardRange.SHARDED, broker.get_own_shard_range().state) # now compact the final two shard ranges to the root; use # --max-shrinking to allow 2 shrinking shards self.assert_subprocess_success([ 'swift-manage-shard-ranges', self.get_db_file(self.brain.part, self.brain.nodes[0]), 'compact', '--yes', '--max-shrinking', '2']) shard_ranges = self.assert_container_state( self.brain.nodes[0], 'sharded', 2) self.assertEqual([ShardRange.SHRINKING] * 2, [sr.state for sr in shard_ranges]) self.replicators.once() self.sharders_once() self.assert_container_state(self.brain.nodes[0], 'collapsed', 0) self.assert_container_listing(obj_names, req_hdrs={'X-Newest': 'True'}) # root container own shard range should now be ACTIVE for i, node in enumerate(self.brain.nodes): with annotate_failure('node[%d]' % i): broker = self.get_broker(self.brain.part, self.brain.nodes[0]) self.assertEqual(ShardRange.ACTIVE, broker.get_own_shard_range().state) def test_manage_shard_ranges_repair_root(self): # provoke overlaps in root container and repair obj_names = self._make_object_names(16) self.put_objects(obj_names) client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) # run replicators first time to get sync points set self.replicators.once() # find 4 shard ranges on nodes[0] - let's denote these ranges 0.0, 0.1, # 0.2 and 0.3 that are installed with epoch_0 self.assert_subprocess_success([ 'swift-manage-shard-ranges', self.get_db_file(self.brain.part, self.brain.nodes[0]), 'find_and_replace', '4', '--enable']) shard_ranges_0 = self.assert_container_state(self.brain.nodes[0], 'unsharded', 4) # *Also* go find 3 shard ranges on *another node*, like a dumb-dumb - # let's denote these ranges 1.0, 1.1 and 1.2 that are installed with # epoch_1 self.assert_subprocess_success([ 'swift-manage-shard-ranges', self.get_db_file(self.brain.part, self.brain.nodes[1]), 'find_and_replace', '7', '--enable']) shard_ranges_1 = self.assert_container_state(self.brain.nodes[1], 'unsharded', 3) # Run sharder in specific order so that the replica with the older # epoch_0 starts sharding first - this will prove problematic later! # On first pass the first replica passes audit, creates shards and then # syncs shard ranges with the other replicas, so it has a mix of 0.* # shard ranges in CLEAVED state and 1.* ranges in FOUND state. It # proceeds to cleave shard 0.0, but after 0.0 cleaving stalls because # next in iteration is shard range 1.0 in FOUND state from the other # replica that it cannot yet cleave. self.sharders_once(number=self.brain.node_numbers[0], additional_args='--partitions=%s' % self.brain.part) # On first pass the second replica passes audit (it has its own found # ranges and the first replica's created shard ranges but none in the # same state overlap), creates its shards and then syncs shard ranges # with the other replicas. All of the 7 shard ranges on this replica # are now in CREATED state so it proceeds to cleave the first two shard # ranges, 0.1 and 1.0. self.sharders_once(number=self.brain.node_numbers[1], additional_args='--partitions=%s' % self.brain.part) self.replicators.once() # Uh-oh self.assert_container_state(self.brain.nodes[0], 'sharding', 7) self.assert_container_state(self.brain.nodes[1], 'sharding', 7) # There's a race: the third replica may be sharding, may be unsharded # Try it again a few times self.sharders_once(additional_args='--partitions=%s' % self.brain.part) self.replicators.once() self.sharders_once(additional_args='--partitions=%s' % self.brain.part) # It's not really fixing itself... the sharder audit will detect # overlapping ranges which prevents cleaving proceeding; expect the # shard ranges to be mostly still in created state, with one or two # possibly cleaved during first pass before the sharding got stalled shard_ranges = self.assert_container_state(self.brain.nodes[0], 'sharding', 7) self.assertEqual([ShardRange.CLEAVED] * 2 + [ShardRange.CREATED] * 5, [sr.state for sr in shard_ranges]) shard_ranges = self.assert_container_state(self.brain.nodes[1], 'sharding', 7) self.assertEqual([ShardRange.CLEAVED] * 2 + [ShardRange.CREATED] * 5, [sr.state for sr in shard_ranges]) # But hey, at least listings still work! They're just going to get # horribly out of date as more objects are added self.assert_container_listing(obj_names) # 'swift-manage-shard-ranges repair' will choose the second set of 3 # shard ranges (1.*) over the first set of 4 (0.*) because that's the # path with most cleaving progress, and so shrink shard ranges 0.*. db_file = self.get_db_file(self.brain.part, self.brain.nodes[0]) self.assert_subprocess_success( ['swift-manage-shard-ranges', db_file, 'repair', '--yes']) # make sure all root replicas now sync their shard ranges self.replicators.once() # Run sharder on the shrinking shards. This should not change the state # of any of the acceptors, particularly the ones that have yet to have # object cleaved from the roots, because we don't want the as yet # uncleaved acceptors becoming prematurely active and creating 'holes' # in listings. The shrinking shard ranges should however get deleted in # root container table. self.run_sharders(shard_ranges_0) shard_ranges = self.assert_container_state(self.brain.nodes[1], 'sharding', 3) self.assertEqual([ShardRange.CLEAVED] * 1 + [ShardRange.CREATED] * 2, [sr.state for sr in shard_ranges]) self.assert_container_listing(obj_names) # check the unwanted shards did shrink away... for shard_range in shard_ranges_0: with annotate_failure(shard_range): found_for_shard = self.categorize_container_dir_content( shard_range.account, shard_range.container) self.assertLengthEqual(found_for_shard['shard_dbs'], 3) actual = [] for shard_db in found_for_shard['shard_dbs']: broker = ContainerBroker(shard_db) own_sr = broker.get_own_shard_range() actual.append( (broker.get_db_state(), own_sr.state, own_sr.deleted)) self.assertEqual([(SHARDED, ShardRange.SHRUNK, True)] * 3, actual) # At this point one of the first two replicas may have done some useful # cleaving of 1.* shards, the other may have only cleaved 0.* shards, # and the third replica may have cleaved no shards. We therefore need # two more passes of the sharder to get to a predictable state where # all replicas have cleaved all three 0.* shards. self.sharders_once() self.sharders_once() # now we expect all replicas to have just the three 1.* shards, with # the 0.* shards all deleted brokers = {} exp_shard_ranges = sorted( [sr.copy(state=ShardRange.SHRUNK, deleted=True) for sr in shard_ranges_0] + [sr.copy(state=ShardRange.ACTIVE) for sr in shard_ranges_1], key=ShardRange.sort_key) for node in (0, 1, 2): with annotate_failure('node %s' % node): broker = self.get_broker(self.brain.part, self.brain.nodes[node]) brokers[node] = broker shard_ranges = broker.get_shard_ranges() self.assertEqual(shard_ranges_1, shard_ranges) shard_ranges = broker.get_shard_ranges(include_deleted=True) self.assertLengthEqual(shard_ranges, len(exp_shard_ranges)) self.maxDiff = None self.assertEqual(exp_shard_ranges, shard_ranges) self.assertEqual(ShardRange.SHARDED, broker._own_shard_range().state) # Sadly, the first replica to start sharding is still reporting its db # state to be 'unsharded' because, although it has sharded, its shard # db epoch (epoch_0) does not match its own shard range epoch # (epoch_1), and that is because the second replica (with epoch_1) # updated the own shard range and replicated it to all other replicas. # If we had run the sharder on the second replica before the first # replica, then by the time the first replica started sharding it would # have learnt the newer epoch_1 and we wouldn't see this inconsistency. self.assertEqual(UNSHARDED, brokers[0].get_db_state()) self.assertEqual(SHARDED, brokers[1].get_db_state()) self.assertEqual(SHARDED, brokers[2].get_db_state()) epoch_1 = brokers[1].db_epoch self.assertEqual(epoch_1, brokers[2].db_epoch) self.assertLess(brokers[0].db_epoch, epoch_1) # the root replica that thinks it is unsharded is problematic - it will # not return shard ranges for listings, but has no objects, so it's # luck of the draw whether we get a listing or not at this point :( # Run the sharders again: the first replica that is still 'unsharded' # because of the older epoch_0 in its db filename will now start to # shard again with a newer epoch_1 db, and will start to re-cleave the # 3 active shards, albeit with zero objects to cleave. self.sharders_once() for node in (0, 1, 2): with annotate_failure('node %s' % node): broker = self.get_broker(self.brain.part, self.brain.nodes[node]) brokers[node] = broker shard_ranges = broker.get_shard_ranges() self.assertEqual(shard_ranges_1, shard_ranges) shard_ranges = broker.get_shard_ranges(include_deleted=True) self.assertLengthEqual(shard_ranges, len(exp_shard_ranges)) self.assertEqual(exp_shard_ranges, shard_ranges) self.assertEqual(ShardRange.SHARDED, broker._own_shard_range().state) self.assertEqual(epoch_1, broker.db_epoch) self.assertIn(brokers[0].get_db_state(), (SHARDING, SHARDED)) self.assertEqual(SHARDED, brokers[1].get_db_state()) self.assertEqual(SHARDED, brokers[2].get_db_state()) # This cycle of the sharders also guarantees that all shards have had # their state updated to ACTIVE from the root; this was not necessarily # true at end of the previous sharder pass because a shard audit (when # the shard is updated from a root) may have happened before all roots # have had their shard ranges transitioned to ACTIVE. for shard_range in shard_ranges_1: with annotate_failure(shard_range): found_for_shard = self.categorize_container_dir_content( shard_range.account, shard_range.container) self.assertLengthEqual(found_for_shard['normal_dbs'], 3) actual = [] for shard_db in found_for_shard['normal_dbs']: broker = ContainerBroker(shard_db) own_sr = broker.get_own_shard_range() actual.append( (broker.get_db_state(), own_sr.state, own_sr.deleted)) self.assertEqual([(UNSHARDED, ShardRange.ACTIVE, False)] * 3, actual) # We may need one more pass of the sharder before all three shard # ranges are cleaved (2 per pass) and all the root replicas are # predictably in sharded state. Note: the accelerated cleaving of >2 # zero-object shard ranges per cycle is defeated if a shard happens # to exist on the same node as the root because the roots cleaving # process doesn't think that it created the shard db and will therefore # replicate it as per a normal cleave. self.sharders_once() for node in (0, 1, 2): with annotate_failure('node %s' % node): broker = self.get_broker(self.brain.part, self.brain.nodes[node]) brokers[node] = broker shard_ranges = broker.get_shard_ranges() self.assertEqual(shard_ranges_1, shard_ranges) shard_ranges = broker.get_shard_ranges(include_deleted=True) self.assertLengthEqual(shard_ranges, len(exp_shard_ranges)) self.assertEqual(exp_shard_ranges, shard_ranges) self.assertEqual(ShardRange.SHARDED, broker._own_shard_range().state) self.assertEqual(epoch_1, broker.db_epoch) self.assertEqual(SHARDED, broker.get_db_state()) # Finally, with all root replicas in a consistent state, the listing # will be be predictably correct self.assert_container_listing(obj_names) def test_manage_shard_ranges_repair_shard(self): # provoke overlaps in a shard container and repair them obj_names = self._make_object_names(24) initial_obj_names = obj_names[::2] # put 12 objects in container self.put_objects(initial_obj_names) client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) # run replicators first time to get sync points set self.replicators.once() # find 3 shard ranges on root nodes[0] and get the root sharded self.assert_subprocess_success([ 'swift-manage-shard-ranges', self.get_db_file(self.brain.part, self.brain.nodes[0]), 'find_and_replace', '4', '--enable']) self.replicators.once() # cleave first two shards self.sharders_once(additional_args='--partitions=%s' % self.brain.part) # cleave third shard self.sharders_once(additional_args='--partitions=%s' % self.brain.part) # ensure all shards learn their ACTIVE state from root self.sharders_once() for node in (0, 1, 2): with annotate_failure('node %d' % node): shard_ranges = self.assert_container_state( self.brain.nodes[node], 'sharded', 3) for sr in shard_ranges: self.assertEqual(ShardRange.ACTIVE, sr.state) self.assert_container_listing(initial_obj_names) # add objects to second shard range so it has 8 objects ; this range # has bounds (obj-0006,obj-0014] root_shard_ranges = self.get_container_shard_ranges() self.assertEqual(3, len(root_shard_ranges)) shard_1 = root_shard_ranges[1] self.assertEqual(obj_names[6], shard_1.lower) self.assertEqual(obj_names[14], shard_1.upper) more_obj_names = obj_names[7:15:2] self.put_objects(more_obj_names) expected_obj_names = sorted(initial_obj_names + more_obj_names) self.assert_container_listing(expected_obj_names) shard_1_part, shard_1_nodes = self.brain.ring.get_nodes( shard_1.account, shard_1.container) # find 3 sub-shards on one shard node; use --force-commits to ensure # the recently PUT objects are included when finding the shard range # pivot points self.assert_subprocess_success([ 'swift-manage-shard-ranges', '--force-commits', self.get_db_file(shard_1_part, shard_1_nodes[1], shard_1.account, shard_1.container), 'find_and_replace', '3', '--enable']) # ... and mistakenly find 4 shard ranges on a different shard node :( self.assert_subprocess_success([ 'swift-manage-shard-ranges', '--force-commits', self.get_db_file(shard_1_part, shard_1_nodes[2], shard_1.account, shard_1.container), 'find_and_replace', '2', '--enable']) # replicate the muddle of shard ranges between shard replicas, merged # result is: # '' - 6 shard ACTIVE # 6 - 8 sub-shard FOUND # 6 - 9 sub-shard FOUND # 8 - 10 sub-shard FOUND # 9 - 12 sub-shard FOUND # 10 - 12 sub-shard FOUND # 12 - 14 sub-shard FOUND # 12 - 14 sub-shard FOUND # 6 - 14 shard SHARDING # 14 - '' shard ACTIVE self.replicators.once() # try hard to shard the shard... self.sharders_once(additional_args='--partitions=%s' % shard_1_part) self.sharders_once(additional_args='--partitions=%s' % shard_1_part) self.sharders_once(additional_args='--partitions=%s' % shard_1_part) # sharding hasn't completed and there's overlaps in the shard and root: # the sub-shards will have been cleaved in the order listed above, but # sub-shards (10 -12) and one of (12 - 14) will be overlooked because # the cleave cursor will have moved past their namespace before they # were yielded by the shard range iterator, so we now have: # '' - 6 shard ACTIVE # 6 - 8 sub-shard ACTIVE # 6 - 9 sub-shard ACTIVE # 8 - 10 sub-shard ACTIVE # 10 - 12 sub-shard CREATED # 9 - 12 sub-shard ACTIVE # 12 - 14 sub-shard CREATED # 12 - 14 sub-shard ACTIVE # 14 - '' shard ACTIVE sub_shard_ranges = self.get_container_shard_ranges( shard_1.account, shard_1.container) self.assertEqual(7, len(sub_shard_ranges), sub_shard_ranges) root_shard_ranges = self.get_container_shard_ranges() self.assertEqual(9, len(root_shard_ranges), root_shard_ranges) self.assertEqual([ShardRange.ACTIVE] * 4 + [ShardRange.CREATED, ShardRange.ACTIVE] * 2 + [ShardRange.ACTIVE], [sr.state for sr in root_shard_ranges]) # fix the overlaps - a set of 3 ACTIVE sub-shards will be chosen and 4 # other sub-shards will be shrunk away; apply the fix at the root # container db_file = self.get_db_file(self.brain.part, self.brain.nodes[0]) self.assert_subprocess_success( ['swift-manage-shard-ranges', db_file, 'repair', '--yes']) self.replicators.once() self.sharders_once() self.sharders_once() # check root now has just 5 shard ranges root_shard_ranges = self.get_container_shard_ranges() self.assertEqual(5, len(root_shard_ranges), root_shard_ranges) self.assertEqual([ShardRange.ACTIVE] * 5, [sr.state for sr in root_shard_ranges]) # check there are 1 sharded shard and 4 shrunk sub-shard ranges in the # root (note, shard_1's shard ranges aren't updated once it has sharded # because the sub-shards report their state to the root; we cannot make # assertions about shrunk states in shard_1's shard range table) root_shard_ranges = self.get_container_shard_ranges( include_deleted=True) self.assertEqual(10, len(root_shard_ranges), root_shard_ranges) shrunk_shard_ranges = [sr for sr in root_shard_ranges if sr.state == ShardRange.SHRUNK] self.assertEqual(4, len(shrunk_shard_ranges), root_shard_ranges) self.assertEqual([True] * 4, [sr.deleted for sr in shrunk_shard_ranges]) sharded_shard_ranges = [sr for sr in root_shard_ranges if sr.state == ShardRange.SHARDED] self.assertEqual(1, len(sharded_shard_ranges), root_shard_ranges) self.assert_container_listing(expected_obj_names)
49.930675
79
0.623613
import json import os import shutil import subprocess import uuid from nose import SkipTest import six from six.moves.urllib.parse import quote from swift.common import direct_client, utils from swift.common.manager import Manager from swift.common.memcached import MemcacheRing from swift.common.utils import ShardRange, parse_db_filename, get_db_files, \ quorum_size, config_true_value, Timestamp, md5 from swift.container.backend import ContainerBroker, UNSHARDED, SHARDING, \ SHARDED from swift.container.sharder import CleavingContext, ContainerSharder from swift.container.replicator import ContainerReplicator from swiftclient import client, get_auth, ClientException from swift.proxy.controllers.base import get_cache_key from swift.proxy.controllers.obj import num_container_updates from test import annotate_failure from test.probe import PROXY_BASE_URL from test.probe.brain import BrainSplitter from test.probe.common import ReplProbeTest, get_server_number, \ wait_for_server_to_hangup import mock MIN_SHARD_CONTAINER_THRESHOLD = 4 MAX_SHARD_CONTAINER_THRESHOLD = 100 class ShardCollector(object): def __init__(self): self.ranges = {} def __call__(self, cnode, cpart, account, container): self.ranges[cnode['id']] = direct_client.direct_get_container( cnode, cpart, account, container, headers={'X-Backend-Record-Type': 'shard'}) class BaseTestContainerSharding(ReplProbeTest): DELIM = '-' def _maybe_skip_test(self): try: cont_configs = [ utils.readconf(p, 'container-sharder') for p in self.configs['container-sharder'].values()] except ValueError: raise SkipTest('No [container-sharder] section found in ' 'container-server configs') skip_reasons = [] auto_shard = all(config_true_value(c.get('auto_shard', False)) for c in cont_configs) if not auto_shard: skip_reasons.append( 'auto_shard must be true in all container_sharder configs') self.max_shard_size = max( int(c.get('shard_container_threshold', '1000000')) for c in cont_configs) if not (MIN_SHARD_CONTAINER_THRESHOLD <= self.max_shard_size <= MAX_SHARD_CONTAINER_THRESHOLD): skip_reasons.append( 'shard_container_threshold %d must be between %d and %d' % (self.max_shard_size, MIN_SHARD_CONTAINER_THRESHOLD, MAX_SHARD_CONTAINER_THRESHOLD)) def skip_check(reason_list, option, required): values = {int(c.get(option, required)) for c in cont_configs} if values != {required}: reason_list.append('%s must be %s' % (option, required)) skip_check(skip_reasons, 'shard_scanner_batch_size', 10) skip_check(skip_reasons, 'shard_batch_size', 2) if skip_reasons: raise SkipTest(', '.join(skip_reasons)) def _load_rings_and_configs(self): super(BaseTestContainerSharding, self)._load_rings_and_configs() self._maybe_skip_test() def _make_object_names(self, number, start=0): return ['obj%s%04d' % (self.DELIM, x) for x in range(start, start + number)] def _setup_container_name(self): self.container_name = 'container%s%s' % (self.DELIM, uuid.uuid4()) def setUp(self): client.logger.setLevel(client.logging.WARNING) client.requests.logging.getLogger().setLevel( client.requests.logging.WARNING) super(BaseTestContainerSharding, self).setUp() _, self.admin_token = get_auth( PROXY_BASE_URL + '/auth/v1.0', 'admin:admin', 'admin') self._setup_container_name() self.init_brain(self.container_name) self.sharders = Manager(['container-sharder']) self.internal_client = self.make_internal_client() self.memcache = MemcacheRing(['127.0.0.1:11211']) def init_brain(self, container_name): self.container_to_shard = container_name self.brain = BrainSplitter( self.url, self.token, self.container_to_shard, None, 'container') self.brain.put_container(policy_index=int(self.policy)) def stop_container_servers(self, node_numbers=None): if node_numbers: ipports = [] server2ipport = {v: k for k, v in self.ipport2server.items()} for number in self.brain.node_numbers[node_numbers]: self.brain.servers.stop(number=number) server = 'container%d' % number ipports.append(server2ipport[server]) else: ipports = [k for k, v in self.ipport2server.items() if v.startswith('container')] self.brain.servers.stop() for ipport in ipports: wait_for_server_to_hangup(ipport) def put_objects(self, obj_names, contents=None): conn = client.Connection(preauthurl=self.url, preauthtoken=self.token) results = [] for obj in obj_names: rdict = {} conn.put_object(self.container_name, obj, contents=contents, response_dict=rdict) results.append((obj, rdict['headers'].get('x-object-version-id'))) return results def delete_objects(self, obj_names_and_versions): conn = client.Connection(preauthurl=self.url, preauthtoken=self.token) for obj in obj_names_and_versions: if isinstance(obj, tuple): obj, version = obj conn.delete_object(self.container_name, obj, query_string='version-id=%s' % version) else: conn.delete_object(self.container_name, obj) def get_container_shard_ranges(self, account=None, container=None, include_deleted=False): account = account if account else self.account container = container if container else self.container_to_shard path = self.internal_client.make_path(account, container) headers = {'X-Backend-Record-Type': 'shard'} if include_deleted: headers['X-Backend-Include-Deleted'] = 'true' resp = self.internal_client.make_request( 'GET', path + '?format=json', headers, [200]) return [ShardRange.from_dict(sr) for sr in json.loads(resp.body)] def direct_get_container_shard_ranges(self, account=None, container=None, expect_failure=False): collector = ShardCollector() self.direct_container_op( collector, account, container, expect_failure) return collector.ranges def get_storage_dir(self, part, node, account=None, container=None): account = account or self.brain.account container = container or self.container_name server_type, config_number = get_server_number( (node['ip'], node['port']), self.ipport2server) assert server_type == 'container' repl_server = '%s-replicator' % server_type conf = utils.readconf(self.configs[repl_server][config_number], section_name=repl_server) datadir = os.path.join(conf['devices'], node['device'], 'containers') container_hash = utils.hash_path(account, container) return (utils.storage_directory(datadir, part, container_hash), container_hash) def get_db_file(self, part, node, account=None, container=None): container_dir, container_hash = self.get_storage_dir( part, node, account=account, container=container) db_file = os.path.join(container_dir, container_hash + '.db') self.assertTrue(get_db_files(db_file)) # sanity check return db_file def get_broker(self, part, node, account=None, container=None): return ContainerBroker( self.get_db_file(part, node, account, container)) def get_shard_broker(self, shard_range, node_index=0): shard_part, shard_nodes = self.brain.ring.get_nodes( shard_range.account, shard_range.container) return self.get_broker( shard_part, shard_nodes[node_index], shard_range.account, shard_range.container) def categorize_container_dir_content(self, account=None, container=None): account = account or self.brain.account container = container or self.container_name part, nodes = self.brain.ring.get_nodes(account, container) storage_dirs = [ self.get_storage_dir(part, node, account=account, container=container)[0] for node in nodes] result = { 'shard_dbs': [], 'normal_dbs': [], 'pendings': [], 'locks': [], 'other': [], } for storage_dir in storage_dirs: for f in os.listdir(storage_dir): path = os.path.join(storage_dir, f) if path.endswith('.db'): hash_, epoch, ext = parse_db_filename(path) if epoch: result['shard_dbs'].append(path) else: result['normal_dbs'].append(path) elif path.endswith('.db.pending'): result['pendings'].append(path) elif path.endswith('/.lock'): result['locks'].append(path) else: result['other'].append(path) if result['other']: self.fail('Found unexpected files in storage directory:\n %s' % '\n '.join(result['other'])) return result def assert_dict_contains(self, expected_items, actual_dict): ignored = set(expected_items) ^ set(actual_dict) filtered_actual = {k: actual_dict[k] for k in actual_dict if k not in ignored} self.assertEqual(expected_items, filtered_actual) def assert_shard_ranges_contiguous(self, expected_number, shard_ranges, first_lower='', last_upper=''): if shard_ranges and isinstance(shard_ranges[0], ShardRange): actual_shard_ranges = sorted(shard_ranges) else: actual_shard_ranges = sorted(ShardRange.from_dict(d) for d in shard_ranges) self.assertLengthEqual(actual_shard_ranges, expected_number) if expected_number: with annotate_failure('Ranges %s.' % actual_shard_ranges): self.assertEqual(first_lower, actual_shard_ranges[0].lower_str) for x, y in zip(actual_shard_ranges, actual_shard_ranges[1:]): self.assertEqual(x.upper, y.lower) self.assertEqual(last_upper, actual_shard_ranges[-1].upper_str) def assert_shard_range_equal(self, expected, actual, excludes=None): excludes = excludes or [] expected_dict = dict(expected) actual_dict = dict(actual) for k in excludes: expected_dict.pop(k, None) actual_dict.pop(k, None) self.assertEqual(expected_dict, actual_dict) def assert_shard_range_lists_equal(self, expected, actual, excludes=None): self.assertEqual(len(expected), len(actual)) for expected, actual in zip(expected, actual): self.assert_shard_range_equal(expected, actual, excludes=excludes) def assert_shard_range_state(self, expected_state, shard_ranges): if shard_ranges and not isinstance(shard_ranges[0], ShardRange): shard_ranges = [ShardRange.from_dict(data) for data in shard_ranges] self.assertEqual([expected_state] * len(shard_ranges), [sr.state for sr in shard_ranges]) def assert_total_object_count(self, expected_object_count, shard_ranges): actual = sum(sr['object_count'] for sr in shard_ranges) self.assertEqual(expected_object_count, actual) def assert_container_listing(self, expected_listing, req_hdrs=None): req_hdrs = req_hdrs if req_hdrs else {} headers, actual_listing = client.get_container( self.url, self.token, self.container_name, headers=req_hdrs) self.assertIn('x-container-object-count', headers) expected_obj_count = len(expected_listing) self.assertEqual(expected_listing, [ x['name'].encode('utf-8') if six.PY2 else x['name'] for x in actual_listing]) self.assertEqual(str(expected_obj_count), headers['x-container-object-count']) return headers, actual_listing def assert_container_object_count(self, expected_obj_count): headers = client.head_container( self.url, self.token, self.container_name) self.assertIn('x-container-object-count', headers) self.assertEqual(str(expected_obj_count), headers['x-container-object-count']) def assert_container_post_ok(self, meta_value): key = 'X-Container-Meta-Assert-Post-Works' headers = {key: meta_value} client.post_container( self.url, self.token, self.container_name, headers=headers) resp_headers = client.head_container( self.url, self.token, self.container_name) self.assertEqual(meta_value, resp_headers.get(key.lower())) def assert_container_post_fails(self, meta_value): key = 'X-Container-Meta-Assert-Post-Works' headers = {key: meta_value} with self.assertRaises(ClientException) as cm: client.post_container( self.url, self.token, self.container_name, headers=headers) self.assertEqual(404, cm.exception.http_status) def assert_container_delete_fails(self): with self.assertRaises(ClientException) as cm: client.delete_container(self.url, self.token, self.container_name) self.assertEqual(409, cm.exception.http_status) def assert_container_not_found(self): with self.assertRaises(ClientException) as cm: client.get_container(self.url, self.token, self.container_name) self.assertEqual(404, cm.exception.http_status) # check for headers leaking out while deleted resp_headers = cm.exception.http_response_headers self.assertNotIn('X-Container-Object-Count', resp_headers) self.assertNotIn('X-Container-Bytes-Used', resp_headers) self.assertNotIn('X-Timestamp', resp_headers) self.assertNotIn('X-PUT-Timestamp', resp_headers) def assert_container_has_shard_sysmeta(self): node_headers = self.direct_head_container() for node_id, headers in node_headers.items(): with annotate_failure('%s in %s' % (node_id, node_headers.keys())): for k, v in headers.items(): if k.lower().startswith('x-container-sysmeta-shard'): break else: self.fail('No shard sysmeta found in %s' % headers) def assert_container_state(self, node, expected_state, num_shard_ranges): headers, shard_ranges = direct_client.direct_get_container( node, self.brain.part, self.account, self.container_to_shard, headers={'X-Backend-Record-Type': 'shard'}) self.assertEqual(num_shard_ranges, len(shard_ranges)) self.assertIn('X-Backend-Sharding-State', headers) self.assertEqual( expected_state, headers['X-Backend-Sharding-State']) return [ShardRange.from_dict(sr) for sr in shard_ranges] def assert_subprocess_success(self, cmd_args): try: subprocess.check_output(cmd_args, stderr=subprocess.STDOUT) except Exception as exc: # why not 'except CalledProcessError'? because in my py3.6 tests # the CalledProcessError wasn't caught by that! despite type(exc) try: self.fail('%s with output:\n%s' % (exc, exc.output)) except AttributeError: raise exc def get_part_and_node_numbers(self, shard_range): part, nodes = self.brain.ring.get_nodes( shard_range.account, shard_range.container) return part, [n['id'] + 1 for n in nodes] def run_sharders(self, shard_ranges): if not isinstance(shard_ranges, (list, tuple, set)): shard_ranges = (shard_ranges,) partitions = ','.join(str(self.get_part_and_node_numbers(sr)[0]) for sr in shard_ranges) self.sharders.once(additional_args='--partitions=%s' % partitions) def run_sharder_sequentially(self, shard_range=None): if shard_range: part, node_numbers = self.get_part_and_node_numbers(shard_range) else: part, node_numbers = self.brain.part, self.brain.node_numbers for node_number in node_numbers: self.sharders.once(number=node_number, additional_args='--partitions=%s' % part) def run_custom_sharder(self, conf_index, custom_conf, **kwargs): return self.run_custom_daemon(ContainerSharder, 'container-sharder', conf_index, custom_conf, **kwargs) class TestContainerShardingNonUTF8(BaseTestContainerSharding): def test_sharding_listing(self): all_obj_names = self._make_object_names(4 * self.max_shard_size) obj_names = all_obj_names[::2] obj_content = 'testing' self.put_objects(obj_names, contents=obj_content) markers = [ obj_names[i] for i in range(self.max_shard_size // 4, 2 * self.max_shard_size, self.max_shard_size // 2)] def check_listing(objects, req_hdrs=None, **params): req_hdrs = req_hdrs if req_hdrs else {} qs = '&'.join('%s=%s' % (k, quote(str(v))) for k, v in params.items()) headers, listing = client.get_container( self.url, self.token, self.container_name, query_string=qs, headers=req_hdrs) listing = [x['name'].encode('utf-8') if six.PY2 else x['name'] for x in listing] if params.get('reverse'): marker = params.get('marker', ShardRange.MAX) end_marker = params.get('end_marker', ShardRange.MIN) expected = [o for o in objects if end_marker < o < marker] expected.reverse() else: marker = params.get('marker', ShardRange.MIN) end_marker = params.get('end_marker', ShardRange.MAX) expected = [o for o in objects if marker < o < end_marker] if 'limit' in params: expected = expected[:params['limit']] self.assertEqual(expected, listing) self.assertIn('x-timestamp', headers) self.assertIn('last-modified', headers) self.assertIn('x-trans-id', headers) self.assertEqual('bytes', headers.get('accept-ranges')) self.assertEqual('application/json; charset=utf-8', headers.get('content-type')) def check_listing_fails(exp_status, **params): qs = '&'.join(['%s=%s' % param for param in params.items()]) with self.assertRaises(ClientException) as cm: client.get_container( self.url, self.token, self.container_name, query_string=qs) self.assertEqual(exp_status, cm.exception.http_status) return cm.exception def do_listing_checks(objs, hdrs=None): hdrs = hdrs if hdrs else {} check_listing(objs, hdrs) check_listing(objs, hdrs, marker=markers[0], end_marker=markers[1]) check_listing(objs, hdrs, marker=markers[0], end_marker=markers[2]) check_listing(objs, hdrs, marker=markers[1], end_marker=markers[3]) check_listing(objs, hdrs, marker=markers[1], end_marker=markers[3], limit=self.max_shard_size // 4) check_listing(objs, hdrs, marker=markers[1], end_marker=markers[3], limit=self.max_shard_size // 4) check_listing(objs, hdrs, marker=markers[1], end_marker=markers[2], limit=self.max_shard_size // 2) check_listing(objs, hdrs, marker=markers[1], end_marker=markers[1]) check_listing(objs, hdrs, reverse=True) check_listing(objs, hdrs, reverse=True, end_marker=markers[1]) check_listing(objs, hdrs, reverse=True, marker=markers[3], end_marker=markers[1], limit=self.max_shard_size // 4) check_listing(objs, hdrs, reverse=True, marker=markers[3], end_marker=markers[1], limit=0) check_listing([], hdrs, marker=markers[0], end_marker=markers[0]) check_listing([], hdrs, marker=markers[0], end_marker=markers[1], reverse=True) check_listing(objs, hdrs, prefix='obj') check_listing([], hdrs, prefix='zzz') headers, listing = client.get_container( self.url, self.token, self.container_name, query_string='delimiter=' + quote(self.DELIM), headers=hdrs) self.assertEqual([{'subdir': 'obj' + self.DELIM}], listing) headers, listing = client.get_container( self.url, self.token, self.container_name, query_string='delimiter=j' + quote(self.DELIM), headers=hdrs) self.assertEqual([{'subdir': 'obj' + self.DELIM}], listing) limit = self.cluster_info['swift']['container_listing_limit'] exc = check_listing_fails(412, limit=limit + 1) self.assertIn(b'Maximum limit', exc.http_response_content) exc = check_listing_fails(400, delimiter='%ff') self.assertIn(b'not valid UTF-8', exc.http_response_content) do_listing_checks(obj_names) client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) self.sharders.once(number=self.brain.node_numbers[0], additional_args='--partitions=%s' % self.brain.part) for n in self.brain.node_numbers[1:]: self.sharders.once( number=n, additional_args='--partitions=%s' % self.brain.part) for node in self.brain.nodes: self.assert_container_state(node, 'sharding', 4) shard_ranges = self.get_container_shard_ranges() self.assertLengthEqual(shard_ranges, 4) self.assert_shard_range_state(ShardRange.CLEAVED, shard_ranges[:2]) self.assert_shard_range_state(ShardRange.CREATED, shard_ranges[2:]) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharding') do_listing_checks(obj_names) new_obj_names = all_obj_names[1::4] self.put_objects(new_obj_names, obj_content) exp_obj_names = [o for o in obj_names + new_obj_names if o <= shard_ranges[1].upper] exp_obj_names += [o for o in obj_names if o > shard_ranges[1].upper] exp_obj_names.sort() do_listing_checks(exp_obj_names) self.sharders.once(additional_args='--partitions=%s' % self.brain.part) for node in self.brain.nodes: self.assert_container_state(node, 'sharded', 4) shard_ranges = self.get_container_shard_ranges() self.assert_shard_range_state(ShardRange.ACTIVE, shard_ranges) exp_obj_names = obj_names + new_obj_names exp_obj_names.sort() do_listing_checks(exp_obj_names) do_listing_checks(exp_obj_names, hdrs={'X-Newest': 'true'}) test_headers = {'x-container-meta-test': 'testing', 'x-container-read': 'read_acl', 'x-container-write': 'write_acl', 'x-container-sync-key': 'sync_key', 'x-versions-location': 'versions', 'x-container-meta-access-control-allow-origin': 'aa', 'x-container-meta-access-control-expose-headers': 'bb', 'x-container-meta-access-control-max-age': '123'} client.post_container(self.url, self.admin_token, self.container_name, headers=test_headers) headers, listing = client.get_container( self.url, self.token, self.container_name, headers={'X-Newest': 'true'}) exp_headers = dict(test_headers) exp_headers.update({ 'x-container-object-count': str(len(exp_obj_names)), 'x-container-bytes-used': str(len(exp_obj_names) * len(obj_content)) }) for k, v in exp_headers.items(): self.assertIn(k, headers) self.assertEqual(v, headers[k], dict(headers)) cache_headers, listing = client.get_container( self.url, self.token, self.container_name) for k, v in exp_headers.items(): self.assertIn(k, cache_headers) self.assertEqual(v, cache_headers[k], dict(exp_headers)) for k in ('x-timestamp', 'last-modified', 'date', 'x-trans-id', 'x-openstack-request-id'): headers.pop(k, None) cache_headers.pop(k, None) self.assertEqual(headers, cache_headers) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') # delete original objects self.delete_objects(obj_names) do_listing_checks(new_obj_names) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') class TestContainerShardingFunkyNames(TestContainerShardingNonUTF8): DELIM = '\n' def _make_object_names(self, number): return ['obj\n%04d%%Ff' % x for x in range(number)] def _setup_container_name(self): self.container_name = 'container\n%%Ff\n%s' % uuid.uuid4() class TestContainerShardingUTF8(TestContainerShardingNonUTF8): def _make_object_names(self, number, start=0): # override default with names that include non-ascii chars name_length = self.cluster_info['swift']['max_object_name_length'] obj_names = [] for x in range(start, start + number): name = (u'obj-\u00e4\u00ea\u00ec\u00f2\u00fb\u1234-%04d' % x) name = name.encode('utf8').ljust(name_length, b'o') if not six.PY2: name = name.decode('utf8') obj_names.append(name) return obj_names def _setup_container_name(self): # override default with max length name that includes non-ascii chars super(TestContainerShardingUTF8, self)._setup_container_name() name_length = self.cluster_info['swift']['max_container_name_length'] cont_name = \ self.container_name + u'-\u00e4\u00ea\u00ec\u00f2\u00fb\u1234' self.container_name = cont_name.encode('utf8').ljust(name_length, b'x') if not six.PY2: self.container_name = self.container_name.decode('utf8') class TestContainerShardingObjectVersioning(BaseTestContainerSharding): def _maybe_skip_test(self): super(TestContainerShardingObjectVersioning, self)._maybe_skip_test() try: vw_config = utils.readconf(self.configs['proxy-server'], 'filter:versioned_writes') except ValueError: raise SkipTest('No [filter:versioned_writes] section found in ' 'proxy-server configs') allow_object_versioning = config_true_value( vw_config.get('allow_object_versioning', False)) if not allow_object_versioning: raise SkipTest('allow_object_versioning must be true ' 'in all versioned_writes configs') def init_brain(self, container_name): client.put_container(self.url, self.token, container_name, headers={ 'X-Storage-Policy': self.policy.name, 'X-Versions-Enabled': 'true', }) self.container_to_shard = '\x00versions\x00' + container_name self.brain = BrainSplitter( self.url, self.token, self.container_to_shard, None, 'container') def test_sharding_listing(self): # verify parameterised listing of a container during sharding all_obj_names = self._make_object_names(3) * self.max_shard_size all_obj_names.extend(self._make_object_names(self.max_shard_size, start=3)) obj_names = all_obj_names[::2] obj_names_and_versions = self.put_objects(obj_names) def sort_key(obj_and_ver): obj, ver = obj_and_ver return obj, ~Timestamp(ver) obj_names_and_versions.sort(key=sort_key) # choose some names approx in middle of each expected shard range markers = [ obj_names_and_versions[i] for i in range(self.max_shard_size // 4, 2 * self.max_shard_size, self.max_shard_size // 2)] def check_listing(objects, **params): params['versions'] = '' qs = '&'.join('%s=%s' % param for param in params.items()) headers, listing = client.get_container( self.url, self.token, self.container_name, query_string=qs) listing = [(x['name'].encode('utf-8') if six.PY2 else x['name'], x['version_id']) for x in listing] if params.get('reverse'): marker = ( params.get('marker', ShardRange.MAX), ~Timestamp(params['version_marker']) if 'version_marker' in params else ~Timestamp('0'), ) end_marker = ( params.get('end_marker', ShardRange.MIN), Timestamp('0'), ) expected = [o for o in objects if end_marker < sort_key(o) < marker] expected.reverse() else: marker = ( params.get('marker', ShardRange.MIN), ~Timestamp(params['version_marker']) if 'version_marker' in params else Timestamp('0'), ) end_marker = ( params.get('end_marker', ShardRange.MAX), ~Timestamp('0'), ) expected = [o for o in objects if marker < sort_key(o) < end_marker] if 'limit' in params: expected = expected[:params['limit']] self.assertEqual(expected, listing) def check_listing_fails(exp_status, **params): params['versions'] = '' qs = '&'.join('%s=%s' % param for param in params.items()) with self.assertRaises(ClientException) as cm: client.get_container( self.url, self.token, self.container_name, query_string=qs) self.assertEqual(exp_status, cm.exception.http_status) return cm.exception def do_listing_checks(objects): check_listing(objects) check_listing(objects, marker=markers[0][0], version_marker=markers[0][1]) check_listing(objects, marker=markers[0][0], version_marker=markers[0][1], limit=self.max_shard_size // 10) check_listing(objects, marker=markers[0][0], version_marker=markers[0][1], limit=self.max_shard_size // 4) check_listing(objects, marker=markers[0][0], version_marker=markers[0][1], limit=self.max_shard_size // 2) check_listing(objects, marker=markers[1][0], version_marker=markers[1][1]) check_listing(objects, marker=markers[1][0], version_marker=markers[1][1], limit=self.max_shard_size // 10) check_listing(objects, marker=markers[2][0], version_marker=markers[2][1], limit=self.max_shard_size // 4) check_listing(objects, marker=markers[2][0], version_marker=markers[2][1], limit=self.max_shard_size // 2) check_listing(objects, reverse=True) check_listing(objects, reverse=True, marker=markers[1][0], version_marker=markers[1][1]) check_listing(objects, prefix='obj') check_listing([], prefix='zzz') # delimiter headers, listing = client.get_container( self.url, self.token, self.container_name, query_string='delimiter=-') self.assertEqual([{'subdir': 'obj-'}], listing) headers, listing = client.get_container( self.url, self.token, self.container_name, query_string='delimiter=j-') self.assertEqual([{'subdir': 'obj-'}], listing) limit = self.cluster_info['swift']['container_listing_limit'] exc = check_listing_fails(412, limit=limit + 1) self.assertIn(b'Maximum limit', exc.http_response_content) exc = check_listing_fails(400, delimiter='%ff') self.assertIn(b'not valid UTF-8', exc.http_response_content) # sanity checks do_listing_checks(obj_names_and_versions) # Shard the container. Use an internal_client so we get an implicit # X-Backend-Allow-Reserved-Names header self.internal_client.set_container_metadata( self.account, self.container_to_shard, { 'X-Container-Sysmeta-Sharding': 'True', }) # First run the 'leader' in charge of scanning, which finds all shard # ranges and cleaves first two self.sharders.once(number=self.brain.node_numbers[0], additional_args='--partitions=%s' % self.brain.part) # Then run sharder on other nodes which will also cleave first two # shard ranges for n in self.brain.node_numbers[1:]: self.sharders.once( number=n, additional_args='--partitions=%s' % self.brain.part) # sanity check shard range states for node in self.brain.nodes: self.assert_container_state(node, 'sharding', 4) shard_ranges = self.get_container_shard_ranges() self.assertLengthEqual(shard_ranges, 4) self.assert_shard_range_state(ShardRange.CLEAVED, shard_ranges[:2]) self.assert_shard_range_state(ShardRange.CREATED, shard_ranges[2:]) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() # confirm no sysmeta deleted self.assert_container_post_ok('sharding') do_listing_checks(obj_names_and_versions) # put some new objects spread through entire namespace new_obj_names = all_obj_names[1::4] new_obj_names_and_versions = self.put_objects(new_obj_names) # new objects that fell into the first two cleaved shard ranges are # reported in listing, new objects in the yet-to-be-cleaved shard # ranges are not yet included in listing exp_obj_names_and_versions = [ o for o in obj_names_and_versions + new_obj_names_and_versions if '\x00' + o[0] <= shard_ranges[1].upper] exp_obj_names_and_versions += [ o for o in obj_names_and_versions if '\x00' + o[0] > shard_ranges[1].upper] exp_obj_names_and_versions.sort(key=sort_key) do_listing_checks(exp_obj_names_and_versions) # run all the sharders again and the last two shard ranges get cleaved self.sharders.once(additional_args='--partitions=%s' % self.brain.part) for node in self.brain.nodes: self.assert_container_state(node, 'sharded', 4) shard_ranges = self.get_container_shard_ranges() self.assert_shard_range_state(ShardRange.ACTIVE, shard_ranges) exp_obj_names_and_versions = \ obj_names_and_versions + new_obj_names_and_versions exp_obj_names_and_versions.sort(key=sort_key) do_listing_checks(exp_obj_names_and_versions) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') # delete original objects self.delete_objects(obj_names_and_versions) new_obj_names_and_versions.sort(key=sort_key) do_listing_checks(new_obj_names_and_versions) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') class TestContainerSharding(BaseTestContainerSharding): def _test_sharded_listing(self, run_replicators=False): obj_names = self._make_object_names(self.max_shard_size) self.put_objects(obj_names) # Verify that we start out with normal DBs, no shards found = self.categorize_container_dir_content() self.assertLengthEqual(found['normal_dbs'], 3) self.assertLengthEqual(found['shard_dbs'], 0) for db_file in found['normal_dbs']: broker = ContainerBroker(db_file) self.assertIs(True, broker.is_root_container()) self.assertEqual('unsharded', broker.get_db_state()) self.assertLengthEqual(broker.get_shard_ranges(), 0) headers, pre_sharding_listing = client.get_container( self.url, self.token, self.container_name) self.assertEqual(obj_names, [ x['name'].encode('utf-8') if six.PY2 else x['name'] for x in pre_sharding_listing]) # sanity # Shard it client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) pre_sharding_headers = client.head_container( self.url, self.admin_token, self.container_name) self.assertEqual('True', pre_sharding_headers.get('x-container-sharding')) # Only run the one in charge of scanning self.sharders.once(number=self.brain.node_numbers[0], additional_args='--partitions=%s' % self.brain.part) # Verify that we have one sharded db -- though the other normal DBs # received the shard ranges that got defined found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 1) broker = self.get_broker(self.brain.part, self.brain.nodes[0]) # sanity check - the shard db is on replica 0 self.assertEqual(found['shard_dbs'][0], broker.db_file) self.assertIs(True, broker.is_root_container()) self.assertEqual('sharded', broker.get_db_state()) orig_root_shard_ranges = [dict(sr) for sr in broker.get_shard_ranges()] self.assertLengthEqual(orig_root_shard_ranges, 2) self.assert_total_object_count(len(obj_names), orig_root_shard_ranges) self.assert_shard_ranges_contiguous(2, orig_root_shard_ranges) self.assertEqual([ShardRange.ACTIVE, ShardRange.ACTIVE], [sr['state'] for sr in orig_root_shard_ranges]) # Contexts should still be there, and should be complete contexts = set([ctx.done() for ctx, _ in CleavingContext.load_all(broker)]) self.assertEqual({True}, contexts) self.direct_delete_container(expect_failure=True) self.assertLengthEqual(found['normal_dbs'], 2) for db_file in found['normal_dbs']: broker = ContainerBroker(db_file) self.assertIs(True, broker.is_root_container()) self.assertEqual('unsharded', broker.get_db_state()) shard_ranges = [dict(sr) for sr in broker.get_shard_ranges()] self.assertEqual([ShardRange.CREATED, ShardRange.CREATED], [sr['state'] for sr in shard_ranges]) # the sharded db had shard range meta_timestamps and state updated # during cleaving, so we do not expect those to be equal on other # nodes self.assert_shard_range_lists_equal( orig_root_shard_ranges, shard_ranges, excludes=['meta_timestamp', 'state', 'state_timestamp']) contexts = list(CleavingContext.load_all(broker)) self.assertEqual([], contexts) # length check if run_replicators: Manager(['container-replicator']).once() # replication doesn't change the db file names found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 1) self.assertLengthEqual(found['normal_dbs'], 2) self.sharders.once(additional_args='--partitions=%s' % self.brain.part) found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 3) self.assertLengthEqual(found['normal_dbs'], 0) for db_file in found['shard_dbs']: broker = ContainerBroker(db_file) self.assertIs(True, broker.is_root_container()) self.assertEqual('sharded', broker.get_db_state()) self.assert_shard_range_lists_equal( orig_root_shard_ranges, broker.get_shard_ranges(), excludes=['meta_timestamp', 'state_timestamp']) for orig, updated in zip(orig_root_shard_ranges, broker.get_shard_ranges()): self.assertGreaterEqual(updated.state_timestamp, orig['state_timestamp']) self.assertGreaterEqual(updated.meta_timestamp, orig['meta_timestamp']) contexts = set([ctx.done() for ctx, _ in CleavingContext.load_all(broker)]) self.assertEqual({True}, contexts) headers, actual_listing = self.assert_container_listing(obj_names) self.assertEqual(headers['last-modified'], pre_sharding_headers['last-modified']) headers, listing = client.get_container(self.url, self.token, self.container_name, query_string='reverse=on') self.assertEqual(pre_sharding_listing[::-1], listing) headers, actual_listing = self.assert_container_listing(obj_names) self.assertEqual(headers['last-modified'], pre_sharding_headers['last-modified']) headers, listing = client.get_container(self.url, self.token, self.container_name, query_string='reverse=on') self.assertEqual(pre_sharding_listing[::-1], listing) more_obj_names = [ 'beta%03d' % x for x in range(self.max_shard_size)] self.put_objects(more_obj_names) # The listing includes new objects (shard ranges haven't changed, just headers, listing = self.assert_container_listing( more_obj_names + obj_names) self.assertEqual(pre_sharding_listing, listing[len(more_obj_names):]) self.assert_container_object_count(len(obj_names)) shard_1 = ShardRange.from_dict(orig_root_shard_ranges[0]) self.run_sharders(shard_1) self.assert_container_object_count(len(more_obj_names + obj_names)) # *again* into three new sub-shards, but nothing happens until the root # leader identifies shard candidate... root_shard_ranges = self.direct_get_container_shard_ranges() for node, (hdrs, root_shards) in root_shard_ranges.items(): self.assertLengthEqual(root_shards, 2) with annotate_failure('node %s. ' % node): self.assertEqual( [ShardRange.ACTIVE] * 2, [sr['state'] for sr in root_shards]) # orig shards 0, 1 should be contiguous self.assert_shard_ranges_contiguous(2, root_shards) # Now run the root leader to identify shard candidate...while one of # the shard container servers is down shard_1_part, shard_1_nodes = self.get_part_and_node_numbers(shard_1) self.brain.servers.stop(number=shard_1_nodes[2]) self.sharders.once(number=self.brain.node_numbers[0], additional_args='--partitions=%s' % self.brain.part) # ... so third replica of first shard state is not moved to sharding found_for_shard = self.categorize_container_dir_content( shard_1.account, shard_1.container) self.assertLengthEqual(found_for_shard['normal_dbs'], 3) self.assertEqual( [ShardRange.SHARDING, ShardRange.SHARDING, ShardRange.ACTIVE], [ContainerBroker(db_file).get_own_shard_range().state for db_file in found_for_shard['normal_dbs']]) # ...then run first cycle of first shard sharders in order, leader # first, to get to predictable state where all nodes have cleaved 2 out # of 3 ranges...starting with first two nodes for node_number in shard_1_nodes[:2]: self.sharders.once( number=node_number, additional_args='--partitions=%s' % shard_1_part) # ... first two replicas start sharding to sub-shards found_for_shard = self.categorize_container_dir_content( shard_1.account, shard_1.container) self.assertLengthEqual(found_for_shard['shard_dbs'], 2) for db_file in found_for_shard['shard_dbs'][:2]: broker = ContainerBroker(db_file) with annotate_failure('shard db file %s. ' % db_file): self.assertIs(False, broker.is_root_container()) self.assertEqual('sharding', broker.get_db_state()) self.assertEqual( ShardRange.SHARDING, broker.get_own_shard_range().state) shard_shards = broker.get_shard_ranges() self.assertEqual( [ShardRange.CLEAVED, ShardRange.CLEAVED, ShardRange.CREATED], [sr.state for sr in shard_shards]) self.assert_shard_ranges_contiguous( 3, shard_shards, first_lower=orig_root_shard_ranges[0]['lower'], last_upper=orig_root_shard_ranges[0]['upper']) contexts = list(CleavingContext.load_all(broker)) self.assertEqual(len(contexts), 1) context, _lm = contexts[0] self.assertIs(context.cleaving_done, False) self.assertIs(context.misplaced_done, True) self.assertEqual(context.ranges_done, 2) self.assertEqual(context.ranges_todo, 1) self.assertEqual(context.max_row, self.max_shard_size * 3 // 2) # but third replica still has no idea it should be sharding self.assertLengthEqual(found_for_shard['normal_dbs'], 3) self.assertEqual( ShardRange.ACTIVE, ContainerBroker( found_for_shard['normal_dbs'][2]).get_own_shard_range().state) # ...but once sharder runs on third replica it will learn its state; # note that any root replica on the stopped container server also won't self.sharders.once( number=shard_1_nodes[2], additional_args='--partitions=%s' % shard_1_part) found_for_shard = self.categorize_container_dir_content( shard_1.account, shard_1.container) self.assertLengthEqual(found_for_shard['shard_dbs'], 2) self.assertLengthEqual(found_for_shard['normal_dbs'], 3) broker = ContainerBroker(found_for_shard['normal_dbs'][2]) self.assertEqual('unsharded', broker.get_db_state()) self.assertEqual( ShardRange.SHARDING, broker.get_own_shard_range().state) self.assertFalse(broker.get_shard_ranges()) contexts = list(CleavingContext.load_all(broker)) self.assertEqual([], contexts) self.brain.servers.start(number=shard_1_nodes[2]) self.replicators.once() contexts = list(CleavingContext.load_all(broker)) self.assertEqual(len(contexts), 2) self.sharders.once( number=shard_1_nodes[2], additional_args='--partitions=%s' % shard_1_part) sharding_broker = ContainerBroker(found_for_shard['normal_dbs'][2]) self.assertEqual('sharding', sharding_broker.get_db_state()) broker_id = broker.get_info()['id'] contexts = list(CleavingContext.load_all(broker)) self.assertEqual(len(contexts), 2) self.assertNotIn(broker_id, [ctx[0].ref for ctx in contexts]) # ...but the sharding one does contexts = list(CleavingContext.load_all(sharding_broker)) self.assertEqual(len(contexts), 3) self.assertIn(broker_id, [ctx[0].ref for ctx in contexts]) # check original first shard range state and sub-shards - all replicas # should now be in consistent state found_for_shard = self.categorize_container_dir_content( shard_1.account, shard_1.container) self.assertLengthEqual(found_for_shard['shard_dbs'], 3) self.assertLengthEqual(found_for_shard['normal_dbs'], 3) for db_file in found_for_shard['shard_dbs']: broker = ContainerBroker(db_file) with annotate_failure('shard db file %s. ' % db_file): self.assertIs(False, broker.is_root_container()) self.assertEqual('sharding', broker.get_db_state()) self.assertEqual( ShardRange.SHARDING, broker.get_own_shard_range().state) shard_shards = broker.get_shard_ranges() self.assertEqual( [ShardRange.CLEAVED, ShardRange.CLEAVED, ShardRange.CREATED], [sr.state for sr in shard_shards]) self.assert_shard_ranges_contiguous( 3, shard_shards, first_lower=orig_root_shard_ranges[0]['lower'], last_upper=orig_root_shard_ranges[0]['upper']) # check third sub-shard is in created state sub_shard = shard_shards[2] found_for_sub_shard = self.categorize_container_dir_content( sub_shard.account, sub_shard.container) self.assertFalse(found_for_sub_shard['shard_dbs']) self.assertLengthEqual(found_for_sub_shard['normal_dbs'], 3) for db_file in found_for_sub_shard['normal_dbs']: broker = ContainerBroker(db_file) with annotate_failure('sub shard db file %s. ' % db_file): self.assertIs(False, broker.is_root_container()) self.assertEqual('unsharded', broker.get_db_state()) self.assertEqual( ShardRange.CREATED, broker.get_own_shard_range().state) self.assertFalse(broker.get_shard_ranges()) # check root shard ranges root_shard_ranges = self.direct_get_container_shard_ranges() for node, (hdrs, root_shards) in root_shard_ranges.items(): self.assertLengthEqual(root_shards, 5) with annotate_failure('node %s. ' % node): # shard ranges are sorted by upper, state, lower, so expect: # sub-shards, orig shard 0, orig shard 1 self.assertEqual( [ShardRange.CLEAVED, ShardRange.CLEAVED, ShardRange.CREATED, ShardRange.SHARDING, ShardRange.ACTIVE], [sr['state'] for sr in root_shards]) # sub-shards 0, 1, 2, orig shard 1 should be contiguous self.assert_shard_ranges_contiguous( 4, root_shards[:3] + root_shards[4:]) # orig shards 0, 1 should be contiguous self.assert_shard_ranges_contiguous(2, root_shards[3:]) self.assert_container_listing(more_obj_names + obj_names) self.assert_container_object_count(len(more_obj_names + obj_names)) # Before writing, kill the cache self.memcache.delete(get_cache_key( self.account, self.container_name, shard='updating')) # add another object that lands in the first of the new sub-shards self.put_objects(['alpha']) # check that alpha object is in the first new shard shard_listings = self.direct_get_container(shard_shards[0].account, shard_shards[0].container) for node, (hdrs, listing) in shard_listings.items(): with annotate_failure(node): self.assertIn('alpha', [o['name'] for o in listing]) self.assert_container_listing(['alpha'] + more_obj_names + obj_names) # Run sharders again so things settle. self.run_sharders(shard_1) # Also run replicators to settle cleaving contexts self.replicators.once() # check original first shard range shards for db_file in found_for_shard['shard_dbs']: broker = ContainerBroker(db_file) with annotate_failure('shard db file %s. ' % db_file): self.assertIs(False, broker.is_root_container()) self.assertEqual('sharded', broker.get_db_state()) self.assertEqual( [ShardRange.ACTIVE] * 3, [sr.state for sr in broker.get_shard_ranges()]) # Contexts should still be there, and should be complete contexts = set([ctx.done() for ctx, _ in CleavingContext.load_all(broker)]) self.assertEqual({True}, contexts) # check root shard ranges root_shard_ranges = self.direct_get_container_shard_ranges() for node, (hdrs, root_shards) in root_shard_ranges.items(): # old first shard range should have been deleted self.assertLengthEqual(root_shards, 4) with annotate_failure('node %s. ' % node): self.assertEqual( [ShardRange.ACTIVE] * 4, [sr['state'] for sr in root_shards]) self.assert_shard_ranges_contiguous(4, root_shards) headers, final_listing = self.assert_container_listing( ['alpha'] + more_obj_names + obj_names) # check root found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 3) self.assertLengthEqual(found['normal_dbs'], 0) new_shard_ranges = None for db_file in found['shard_dbs']: broker = ContainerBroker(db_file) self.assertIs(True, broker.is_root_container()) self.assertEqual('sharded', broker.get_db_state()) if new_shard_ranges is None: new_shard_ranges = broker.get_shard_ranges( include_deleted=True) self.assertLengthEqual(new_shard_ranges, 5) # Second half is still there, and unchanged self.assertIn( dict(orig_root_shard_ranges[1], meta_timestamp=None, state_timestamp=None), [dict(sr, meta_timestamp=None, state_timestamp=None) for sr in new_shard_ranges]) # But the first half split in three, then deleted by_name = {sr.name: sr for sr in new_shard_ranges} self.assertIn(orig_root_shard_ranges[0]['name'], by_name) old_shard_range = by_name.pop( orig_root_shard_ranges[0]['name']) self.assertTrue(old_shard_range.deleted) self.assert_shard_ranges_contiguous(4, list(by_name.values())) else: # Everyone's on the same page. Well, except for other_shard_ranges = broker.get_shard_ranges( include_deleted=True) self.assert_shard_range_lists_equal( new_shard_ranges, other_shard_ranges, excludes=['meta_timestamp', 'state_timestamp']) for orig, updated in zip(orig_root_shard_ranges, other_shard_ranges): self.assertGreaterEqual(updated.meta_timestamp, orig['meta_timestamp']) self.assert_container_delete_fails() for obj in final_listing: client.delete_object( self.url, self.token, self.container_name, obj['name']) self.assert_container_listing([]) # but root container stats will not yet be aware of the deletions self.assert_container_delete_fails() # One server was down while the shard sharded its first two sub-shards, # so there may be undeleted handoff db(s) for sub-shard(s) that were # not fully replicated; run replicators now to clean up so they no # longer report bogus stats to root. self.replicators.once() # Run sharder so that shard containers update the root. Do not run # sharder on root container because that triggers shrinks which can # cause root object count to temporarily be non-zero and prevent the # final delete. self.run_sharders(self.get_container_shard_ranges()) # then root is empty and can be deleted self.assert_container_listing([]) self.assert_container_object_count(0) client.delete_container(self.url, self.token, self.container_name) def test_sharded_listing_no_replicators(self): self._test_sharded_listing() def test_sharded_listing_with_replicators(self): self._test_sharded_listing(run_replicators=True) def test_async_pendings(self): obj_names = self._make_object_names(self.max_shard_size * 2) # There are some updates *everyone* gets self.put_objects(obj_names[::5]) # But roll some outages so each container only get ~2/5 more object # records i.e. total of 3/5 updates per container; and async pendings # pile up for i, n in enumerate(self.brain.node_numbers, start=1): self.brain.servers.stop(number=n) self.put_objects(obj_names[i::5]) self.brain.servers.start(number=n) # But there are also 1/5 updates *no one* gets self.brain.servers.stop() self.put_objects(obj_names[4::5]) self.brain.servers.start() # Shard it client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) headers = client.head_container(self.url, self.admin_token, self.container_name) self.assertEqual('True', headers.get('x-container-sharding')) # sanity check found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 0) self.assertLengthEqual(found['normal_dbs'], 3) for db_file in found['normal_dbs']: broker = ContainerBroker(db_file) self.assertIs(True, broker.is_root_container()) self.assertEqual(len(obj_names) * 3 // 5, broker.get_info()['object_count']) # Only run the 'leader' in charge of scanning. # Each container has ~2 * max * 3/5 objects # which are distributed from obj000 to obj<2 * max - 1>, # so expect 3 shard ranges to be found: the first two will be complete # shards with max/2 objects and lower/upper bounds spaced by approx: # (2 * max - 1)/(2 * max * 3/5) * (max/2) =~ 5/6 * max # # Note that during this shard cycle the leader replicates to other # nodes so they will end up with ~2 * max * 4/5 objects. self.sharders.once(number=self.brain.node_numbers[0], additional_args='--partitions=%s' % self.brain.part) # Verify that we have one shard db -- though the other normal DBs # received the shard ranges that got defined found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 1) node_index_zero_db = found['shard_dbs'][0] broker = ContainerBroker(node_index_zero_db) self.assertIs(True, broker.is_root_container()) self.assertEqual(SHARDING, broker.get_db_state()) expected_shard_ranges = broker.get_shard_ranges() self.assertLengthEqual(expected_shard_ranges, 3) self.assertEqual( [ShardRange.CLEAVED, ShardRange.CLEAVED, ShardRange.CREATED], [sr.state for sr in expected_shard_ranges]) # Still have all three big DBs -- we've only cleaved 2 of the 3 shard self.assertLengthEqual(found['normal_dbs'], 3) db_states = [] for db_file in found['normal_dbs']: broker = ContainerBroker(db_file) self.assertIs(True, broker.is_root_container()) db_states.append(broker.get_db_state()) self.assert_shard_range_lists_equal( expected_shard_ranges, broker.get_shard_ranges(), excludes=['meta_timestamp', 'state_timestamp', 'state']) self.assertEqual(len(obj_names) * 3 // 5, broker.get_info()['object_count']) self.assertEqual([SHARDING, UNSHARDED, UNSHARDED], sorted(db_states)) for n in self.brain.node_numbers[1:]: self.sharders.once( number=n, additional_args='--partitions=%s' % self.brain.part) found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 3) self.assertLengthEqual(found['normal_dbs'], 3) for db_file in found['normal_dbs']: broker = ContainerBroker(db_file) self.assertEqual(SHARDING, broker.get_db_state()) # no new rows self.assertEqual(len(obj_names) * 3 // 5, broker.get_info()['object_count']) # Run updaters to clear the async pendings Manager(['object-updater']).once() # Our "big" dbs didn't take updates for db_file in found['normal_dbs']: broker = ContainerBroker(db_file) self.assertEqual(len(obj_names) * 3 // 5, broker.get_info()['object_count']) for sr in expected_shard_ranges: shard_listings = self.direct_get_container(sr.account, sr.container) for node, (hdrs, listing) in shard_listings.items(): shard_listing_names = [ o['name'].encode('utf-8') if six.PY2 else o['name'] for o in listing] for obj in obj_names[4::5]: if obj in sr: self.assertIn(obj, shard_listing_names) else: self.assertNotIn(obj, shard_listing_names) headers, listing = client.get_container(self.url, self.token, self.container_name) start_listing = [ o for o in obj_names if o <= expected_shard_ranges[1].upper] self.assertEqual( [x['name'].encode('utf-8') if six.PY2 else x['name'] for x in listing[:len(start_listing)]], start_listing) # there should be something self.assertTrue( [x['name'].encode('utf-8') if six.PY2 else x['name'] for x in listing[len(start_listing):]]) self.assertIn('x-container-object-count', headers) self.assertEqual(str(len(listing)), headers['x-container-object-count']) headers, listing = client.get_container(self.url, self.token, self.container_name, query_string='reverse=on') self.assertEqual([x['name'].encode('utf-8') if six.PY2 else x['name'] for x in listing[-len(start_listing):]], list(reversed(start_listing))) self.assertIn('x-container-object-count', headers) self.assertEqual(str(len(listing)), headers['x-container-object-count']) self.assertTrue( [x['name'].encode('utf-8') if six.PY2 else x['name'] for x in listing[:-len(start_listing)]]) # Run the sharders again to get everything to settle self.sharders.once() found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 3) self.assertLengthEqual(found['normal_dbs'], 0) # now all shards have been cleaved we should get the complete listing headers, listing = client.get_container(self.url, self.token, self.container_name) self.assertEqual([x['name'].encode('utf-8') if six.PY2 else x['name'] for x in listing], obj_names) def test_shrinking(self): int_client = self.make_internal_client() def check_node_data(node_data, exp_hdrs, exp_obj_count, exp_shards, exp_sharded_root_range=False): hdrs, range_data = node_data self.assert_dict_contains(exp_hdrs, hdrs) sharded_root_range = False other_range_data = [] for data in range_data: sr = ShardRange.from_dict(data) if (sr.account == self.account and sr.container == self.container_name and sr.state == ShardRange.SHARDED): # only expect one root range self.assertFalse(sharded_root_range, range_data) sharded_root_range = True self.assertEqual(ShardRange.MIN, sr.lower, sr) self.assertEqual(ShardRange.MAX, sr.upper, sr) else: # include active root range in further assertions other_range_data.append(data) self.assertEqual(exp_sharded_root_range, sharded_root_range) self.assert_shard_ranges_contiguous(exp_shards, other_range_data) self.assert_total_object_count(exp_obj_count, other_range_data) def check_shard_nodes_data(node_data, expected_state='unsharded', expected_shards=0, exp_obj_count=0, exp_sharded_root_range=False): # checks that shard range is consistent on all nodes root_path = '%s/%s' % (self.account, self.container_name) exp_shard_hdrs = { 'X-Container-Sysmeta-Shard-Quoted-Root': quote(root_path), 'X-Backend-Sharding-State': expected_state} object_counts = [] bytes_used = [] for node_id, node_data in node_data.items(): with annotate_failure('Node id %s.' % node_id): check_node_data( node_data, exp_shard_hdrs, exp_obj_count, expected_shards, exp_sharded_root_range) hdrs = node_data[0] object_counts.append(int(hdrs['X-Container-Object-Count'])) bytes_used.append(int(hdrs['X-Container-Bytes-Used'])) if len(set(object_counts)) != 1: self.fail('Inconsistent object counts: %s' % object_counts) if len(set(bytes_used)) != 1: self.fail('Inconsistent bytes used: %s' % bytes_used) return object_counts[0], bytes_used[0] repeat = [0] def do_shard_then_shrink(): repeat[0] += 1 obj_names = ['obj-%s-%03d' % (repeat[0], x) for x in range(self.max_shard_size)] self.put_objects(obj_names) # these two object names will fall at start of first shard range... alpha = 'alpha-%s' % repeat[0] beta = 'beta-%s' % repeat[0] # Enable sharding client.post_container( self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) # sanity check self.assert_container_listing(obj_names) # Only run the one in charge of scanning self.sharders.once( number=self.brain.node_numbers[0], additional_args='--partitions=%s' % self.brain.part) # check root container root_nodes_data = self.direct_get_container_shard_ranges() self.assertEqual(3, len(root_nodes_data)) # nodes on which sharder has not run are still in unsharded state # but have had shard ranges replicated to them exp_obj_count = len(obj_names) exp_hdrs = {'X-Backend-Sharding-State': 'unsharded', 'X-Container-Object-Count': str(exp_obj_count)} node_id = self.brain.node_numbers[1] - 1 check_node_data( root_nodes_data[node_id], exp_hdrs, exp_obj_count, 2) node_id = self.brain.node_numbers[2] - 1 check_node_data( root_nodes_data[node_id], exp_hdrs, exp_obj_count, 2) # only one that ran sharder is in sharded state exp_hdrs['X-Backend-Sharding-State'] = 'sharded' node_id = self.brain.node_numbers[0] - 1 check_node_data( root_nodes_data[node_id], exp_hdrs, exp_obj_count, 2) orig_range_data = root_nodes_data[node_id][1] orig_shard_ranges = [ShardRange.from_dict(r) for r in orig_range_data] # check first shard shard_nodes_data = self.direct_get_container_shard_ranges( orig_shard_ranges[0].account, orig_shard_ranges[0].container) obj_count, bytes_used = check_shard_nodes_data(shard_nodes_data) total_shard_object_count = obj_count # check second shard shard_nodes_data = self.direct_get_container_shard_ranges( orig_shard_ranges[1].account, orig_shard_ranges[1].container) obj_count, bytes_used = check_shard_nodes_data(shard_nodes_data) total_shard_object_count += obj_count self.assertEqual(exp_obj_count, total_shard_object_count) # Now that everyone has shard ranges, run *everyone* self.sharders.once( additional_args='--partitions=%s' % self.brain.part) # all root container nodes should now be in sharded state root_nodes_data = self.direct_get_container_shard_ranges() self.assertEqual(3, len(root_nodes_data)) for node_id, node_data in root_nodes_data.items(): with annotate_failure('Node id %s.' % node_id): check_node_data(node_data, exp_hdrs, exp_obj_count, 2) # run updaters to update .sharded account; shard containers have # not updated account since having objects replicated to them self.updaters.once() shard_cont_count, shard_obj_count = int_client.get_account_info( orig_shard_ranges[0].account, [204]) self.assertEqual(2 * repeat[0], shard_cont_count) # the shards account should always have zero object count to avoid # double accounting self.assertEqual(0, shard_obj_count) # checking the listing also refreshes proxy container info cache so # that the proxy becomes aware that container is sharded and will # now look up the shard target for subsequent updates self.assert_container_listing(obj_names) # Before writing, kill the cache self.memcache.delete(get_cache_key( self.account, self.container_name, shard='updating')) # delete objects from first shard range first_shard_objects = [obj_name for obj_name in obj_names if obj_name <= orig_shard_ranges[0].upper] for obj in first_shard_objects: client.delete_object( self.url, self.token, self.container_name, obj) with self.assertRaises(ClientException): client.get_object( self.url, self.token, self.container_name, obj) second_shard_objects = [obj_name for obj_name in obj_names if obj_name > orig_shard_ranges[1].lower] self.assert_container_listing(second_shard_objects) # put a new object 'alpha' in first shard range self.put_objects([alpha]) second_shard_objects = [obj_name for obj_name in obj_names if obj_name > orig_shard_ranges[1].lower] self.assert_container_listing([alpha] + second_shard_objects) # while container servers are down, but proxy has container info in # cache from recent listing, put another object; this update will # lurk in async pending until the updaters run again; because all # the root container servers are down and therefore cannot respond # to a GET for a redirect target, the object update will default to # being targeted at the root container self.stop_container_servers() # Before writing, kill the cache self.memcache.delete(get_cache_key( self.account, self.container_name, shard='updating')) self.put_objects([beta]) self.brain.servers.start() async_pendings = self.gather_async_pendings( self.get_all_object_nodes()) num_container_replicas = len(self.brain.nodes) num_obj_replicas = self.policy.object_ring.replica_count expected_num_updates = num_container_updates( num_container_replicas, quorum_size(num_container_replicas), num_obj_replicas, self.policy.quorum) expected_num_pendings = min(expected_num_updates, num_obj_replicas) # sanity check with annotate_failure('policy %s. ' % self.policy): self.assertLengthEqual(async_pendings, expected_num_pendings) # root object count is not updated... self.assert_container_object_count(len(obj_names)) self.assert_container_listing([alpha] + second_shard_objects) root_nodes_data = self.direct_get_container_shard_ranges() self.assertEqual(3, len(root_nodes_data)) for node_id, node_data in root_nodes_data.items(): with annotate_failure('Node id %s.' % node_id): check_node_data(node_data, exp_hdrs, exp_obj_count, 2) range_data = node_data[1] self.assert_shard_range_lists_equal( orig_range_data, range_data, excludes=['meta_timestamp', 'state_timestamp']) # ...until the sharders run and update root; reclaim tombstones so # that the shard is shrinkable shard_0_part = self.get_part_and_node_numbers( orig_shard_ranges[0])[0] for conf_index in self.configs['container-sharder'].keys(): self.run_custom_sharder(conf_index, {'reclaim_age': 0}, override_partitions=[shard_0_part]) exp_obj_count = len(second_shard_objects) + 1 self.assert_container_object_count(exp_obj_count) self.assert_container_listing([alpha] + second_shard_objects) # root sharder finds donor, acceptor pair and pushes changes self.sharders.once( additional_args='--partitions=%s' % self.brain.part) self.assert_container_listing([alpha] + second_shard_objects) # run sharder on donor to shrink and replicate to acceptor self.run_sharders(orig_shard_ranges[0]) self.assert_container_listing([alpha] + second_shard_objects) # run sharder on acceptor to update root with stats self.run_sharders(orig_shard_ranges[1]) self.assert_container_listing([alpha] + second_shard_objects) self.assert_container_object_count(len(second_shard_objects) + 1) # check root container root_nodes_data = self.direct_get_container_shard_ranges() self.assertEqual(3, len(root_nodes_data)) exp_hdrs['X-Container-Object-Count'] = str(exp_obj_count) for node_id, node_data in root_nodes_data.items(): with annotate_failure('Node id %s.' % node_id): # NB now only *one* shard range in root check_node_data(node_data, exp_hdrs, exp_obj_count, 1) # the acceptor shard is intact.. shard_nodes_data = self.direct_get_container_shard_ranges( orig_shard_ranges[1].account, orig_shard_ranges[1].container) obj_count, bytes_used = check_shard_nodes_data(shard_nodes_data) # all objects should now be in this shard self.assertEqual(exp_obj_count, obj_count) # the donor shard is also still intact donor = orig_shard_ranges[0] shard_nodes_data = self.direct_get_container_shard_ranges( donor.account, donor.container) # the donor's shard range will have the acceptor's projected stats; # donor also has copy of root shard range that will be ignored; # note: expected_shards does not include the sharded root range obj_count, bytes_used = check_shard_nodes_data( shard_nodes_data, expected_state='sharded', expected_shards=1, exp_obj_count=len(second_shard_objects) + 1, exp_sharded_root_range=True) # but the donor is empty and so reports zero stats self.assertEqual(0, obj_count) self.assertEqual(0, bytes_used) # check the donor own shard range state part, nodes = self.brain.ring.get_nodes( donor.account, donor.container) for node in nodes: with annotate_failure(node): broker = self.get_broker( part, node, donor.account, donor.container) own_sr = broker.get_own_shard_range() self.assertEqual(ShardRange.SHRUNK, own_sr.state) self.assertTrue(own_sr.deleted) # delete all the second shard's object apart from 'alpha' for obj in second_shard_objects: client.delete_object( self.url, self.token, self.container_name, obj) self.assert_container_listing([alpha]) self.sharders.once() self.assert_container_listing([alpha]) self.sharders.once() self.assert_container_listing([alpha]) self.sharders.once() self.assert_container_listing([alpha]) shard_nodes_data = self.direct_get_container_shard_ranges( orig_shard_ranges[1].account, orig_shard_ranges[1].container) obj_count, bytes_used = check_shard_nodes_data(shard_nodes_data) self.assertEqual(1, obj_count) shard_1_part = self.get_part_and_node_numbers( orig_shard_ranges[1])[0] for conf_index in self.configs['container-sharder'].keys(): self.run_custom_sharder(conf_index, {'reclaim_age': 0}, override_partitions=[shard_1_part]) self.assert_container_listing([alpha]) self.sharders.once() self.assert_container_listing([alpha]) self.sharders.once() self.assert_container_listing([alpha]) shard_nodes_data = self.direct_get_container_shard_ranges( orig_shard_ranges[1].account, orig_shard_ranges[1].container) check_shard_nodes_data( shard_nodes_data, expected_state='sharded', expected_shards=1, exp_obj_count=1) root_nodes_data = self.direct_get_container_shard_ranges() self.assertEqual(3, len(root_nodes_data)) exp_hdrs = {'X-Backend-Sharding-State': 'collapsed', 'X-Container-Object-Count': '1'} for node_id, node_data in root_nodes_data.items(): with annotate_failure('Node id %s.' % node_id): check_node_data(node_data, exp_hdrs, 0, 0) client.delete_object( self.url, self.token, self.container_name, alpha) client.delete_container(self.url, self.token, self.container_name) self.assert_container_not_found() self.direct_head_container(expect_failure=True) self.sharders.once() self.assert_container_not_found() self.direct_head_container(expect_failure=True) self.updaters.once() self.assert_container_listing([beta]) client.delete_object( self.url, self.token, self.container_name, beta) do_shard_then_shrink() do_shard_then_shrink() def test_delete_root_reclaim(self): all_obj_names = self._make_object_names(self.max_shard_size) self.put_objects(all_obj_names) client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) for n in self.brain.node_numbers: self.sharders.once( number=n, additional_args='--partitions=%s' % self.brain.part) for node in self.brain.nodes: self.assert_container_state(node, 'sharded', 2) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') self.assert_container_listing(all_obj_names) self.delete_objects(all_obj_names) self.assert_container_listing([]) self.assert_container_post_ok('has objects') self.assert_container_object_count(len(all_obj_names)) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() shard_ranges = self.get_container_shard_ranges() self.assertLengthEqual(shard_ranges, 2) self.run_sharders(shard_ranges) self.assert_container_listing([]) self.assert_container_post_ok('empty') self.assert_container_object_count(0) client.delete_container(self.url, self.token, self.container_name) self.assert_container_post_fails('deleted') self.assert_container_not_found() Manager(['container-updater']).once() for conf_file in self.configs['container-replicator'].values(): conf = utils.readconf(conf_file, 'container-replicator') conf['reclaim_age'] = 0 ContainerReplicator(conf).run_once() for conf_index in self.configs['container-sharder'].keys(): sharder = self.run_custom_sharder(conf_index, {}) self.assertEqual([], sharder.logger.get_lines_for_level('warning')) # until the root wants to start reclaiming but we haven't shrunk yet! found_warning = False for conf_index in self.configs['container-sharder'].keys(): sharder = self.run_custom_sharder(conf_index, {'reclaim_age': 0}) warnings = sharder.logger.get_lines_for_level('warning') if warnings: self.assertTrue(warnings[0].startswith( 'Reclaimable db stuck waiting for shrinking')) self.assertEqual(1, len(warnings)) found_warning = True self.assertTrue(found_warning) def _setup_replication_scenario(self, num_shards, extra_objs=('alpha',)): obj_names = self._make_object_names( num_shards * self.max_shard_size // 2) self.put_objects(obj_names) client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) node_numbers = self.brain.node_numbers self.replicators.once() self.stop_container_servers(slice(0, 2)) self.put_objects(extra_objs) for number in node_numbers[:2]: self.brain.servers.start(number=number) self.brain.servers.stop(number=node_numbers[2]) self.assert_container_listing(obj_names) for number in node_numbers[:2]: self.sharders.once( number=number, additional_args='--partitions=%s' % self.brain.part) self.assert_container_listing(obj_names) return obj_names def test_replication_to_sharding_container(self): obj_names = self._setup_replication_scenario(3) for node in self.brain.nodes[:2]: self.assert_container_state(node, 'sharding', 3) node_numbers = self.brain.node_numbers self.brain.servers.start(number=node_numbers[2]) self.assert_container_state(self.brain.nodes[2], 'unsharded', 0) self.replicators.once(number=node_numbers[2]) found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 2) self.assertLengthEqual(found['normal_dbs'], 3) for node in self.brain.nodes[:2]: broker = self.get_broker(self.brain.part, node) with annotate_failure( 'Node id %s in %s' % (node['id'], self.brain.nodes[:2])): self.assertFalse(broker.get_objects()) self.assert_container_state(node, 'sharding', 3) self.brain.servers.stop(number=node_numbers[2]) self.assert_container_listing(obj_names) self.brain.servers.start(number=node_numbers[2]) node_data = self.direct_get_container_shard_ranges() for node, (hdrs, shard_ranges) in node_data.items(): with annotate_failure(node): self.assert_shard_ranges_contiguous(3, shard_ranges) self.brain.servers.stop(number=node_numbers[2]) for number in node_numbers[:2]: self.sharders.once( number=number, additional_args='--partitions=%s' % self.brain.part) self.assert_container_state(self.brain.nodes[0], 'sharded', 3) self.assert_container_state(self.brain.nodes[1], 'sharded', 3) self.assert_container_listing(obj_names) self.brain.servers.start(number=node_numbers[2]) self.assert_container_state(self.brain.nodes[2], 'unsharded', 3) self.sharders.once(number=node_numbers[2], additional_args='--partitions=%s' % self.brain.part) self.assert_container_state(self.brain.nodes[2], 'sharding', 3) self.sharders.once(number=node_numbers[2], additional_args='--partitions=%s' % self.brain.part) self.assert_container_state(self.brain.nodes[2], 'sharded', 3) self.assert_container_listing(['alpha'] + obj_names) def test_replication_to_sharded_container(self): obj_names = self._setup_replication_scenario(2) for node in self.brain.nodes[:2]: self.assert_container_state(node, 'sharded', 2) found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 2) self.assertLengthEqual(found['normal_dbs'], 1) for node in self.brain.nodes[:2]: broker = self.get_broker(self.brain.part, node) info = broker.get_info() with annotate_failure( 'Node id %s in %s' % (node['id'], self.brain.nodes[:2])): self.assertEqual(len(obj_names), info['object_count']) self.assertFalse(broker.get_objects()) node_numbers = self.brain.node_numbers self.brain.servers.start(number=node_numbers[2]) self.assert_container_state(self.brain.nodes[2], 'unsharded', 0) self.replicators.once(number=node_numbers[2]) found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 2) self.assertLengthEqual(found['normal_dbs'], 1) for node in self.brain.nodes[:2]: broker = self.get_broker(self.brain.part, node) with annotate_failure( 'Node id %s in %s' % (node['id'], self.brain.nodes[:2])): self.assertFalse(broker.get_objects()) self.assert_container_state(node, 'sharded', 2) self.brain.servers.stop(number=node_numbers[2]) self.assert_container_listing(obj_names) self.brain.servers.start(number=node_numbers[2]) node_data = self.direct_get_container_shard_ranges() for node, (hdrs, shard_ranges) in node_data.items(): with annotate_failure(node): self.assert_shard_ranges_contiguous(2, shard_ranges) self.assert_container_state(self.brain.nodes[2], 'unsharded', 2) self.sharders.once(number=node_numbers[2], additional_args='--partitions=%s' % self.brain.part) self.assert_container_state(self.brain.nodes[2], 'sharded', 2) self.assert_container_listing(['alpha'] + obj_names) def test_sharding_requires_sufficient_replication(self): obj_names = self._make_object_names(2 * self.max_shard_size) self.put_objects(obj_names) client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) node_numbers = self.brain.node_numbers leader_node = self.brain.nodes[0] leader_num = node_numbers[0] self.replicators.once() self.sharders.once(number=leader_num, additional_args='--partitions=%s' % self.brain.part) shard_ranges = self.assert_container_state(leader_node, 'sharding', 4) self.assertEqual([ShardRange.CLEAVED] * 2 + [ShardRange.CREATED] * 2, [sr.state for sr in shard_ranges]) recon = direct_client.direct_get_recon(leader_node, "sharding") expected_in_progress = {'all': [{'account': 'AUTH_test', 'active': 0, 'cleaved': 2, 'created': 2, 'found': 0, 'db_state': 'sharding', 'state': 'sharding', 'error': None, 'file_size': mock.ANY, 'meta_timestamp': mock.ANY, 'node_index': 0, 'object_count': len(obj_names), 'container': mock.ANY, 'path': mock.ANY, 'root': mock.ANY}]} actual = recon['sharding_stats']['sharding']['sharding_in_progress'] self.assertEqual(expected_in_progress, actual) # stop *all* container servers for third shard range sr_part, sr_node_nums = self.get_part_and_node_numbers(shard_ranges[2]) for node_num in sr_node_nums: self.brain.servers.stop(number=node_num) # attempt to continue sharding on the leader node self.sharders.once(number=leader_num, additional_args='--partitions=%s' % self.brain.part) # no cleaving progress was made for node_num in sr_node_nums: self.brain.servers.start(number=node_num) shard_ranges = self.assert_container_state(leader_node, 'sharding', 4) self.assertEqual([ShardRange.CLEAVED] * 2 + [ShardRange.CREATED] * 2, [sr.state for sr in shard_ranges]) # stop two of the servers for third shard range, not including any # server that happens to be the leader node stopped = [] for node_num in sr_node_nums: if node_num != leader_num: self.brain.servers.stop(number=node_num) stopped.append(node_num) if len(stopped) >= 2: break self.assertLengthEqual(stopped, 2) # sanity check # attempt to continue sharding on the leader node self.sharders.once(number=leader_num, additional_args='--partitions=%s' % self.brain.part) # no cleaving progress was made for node_num in stopped: self.brain.servers.start(number=node_num) shard_ranges = self.assert_container_state(leader_node, 'sharding', 4) self.assertEqual([ShardRange.CLEAVED] * 2 + [ShardRange.CREATED] * 2, [sr.state for sr in shard_ranges]) # stop just one of the servers for third shard range stopped = [] for node_num in sr_node_nums: if node_num != leader_num: self.brain.servers.stop(number=node_num) stopped.append(node_num) break self.assertLengthEqual(stopped, 1) # sanity check # attempt to continue sharding the container self.sharders.once(number=leader_num, additional_args='--partitions=%s' % self.brain.part) # this time cleaving completed self.brain.servers.start(number=stopped[0]) shard_ranges = self.assert_container_state(leader_node, 'sharded', 4) self.assertEqual([ShardRange.ACTIVE] * 4, [sr.state for sr in shard_ranges]) # Check the leader's progress again, this time is should be complete recon = direct_client.direct_get_recon(leader_node, "sharding") expected_in_progress = {'all': [{'account': 'AUTH_test', 'active': 4, 'cleaved': 0, 'created': 0, 'found': 0, 'db_state': 'sharded', 'state': 'sharded', 'error': None, 'file_size': mock.ANY, 'meta_timestamp': mock.ANY, 'node_index': 0, 'object_count': len(obj_names), 'container': mock.ANY, 'path': mock.ANY, 'root': mock.ANY}]} actual = recon['sharding_stats']['sharding']['sharding_in_progress'] self.assertEqual(expected_in_progress, actual) def test_sharded_delete(self): all_obj_names = self._make_object_names(self.max_shard_size) self.put_objects(all_obj_names) client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) for n in self.brain.node_numbers: self.sharders.once( number=n, additional_args='--partitions=%s' % self.brain.part) for node in self.brain.nodes: self.assert_container_state(node, 'sharded', 2) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') self.assert_container_listing(all_obj_names) self.delete_objects(all_obj_names) self.assert_container_listing([]) self.assert_container_post_ok('has objects') self.assert_container_object_count(len(all_obj_names)) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() shard_ranges = self.get_container_shard_ranges() self.assertLengthEqual(shard_ranges, 2) self.run_sharders(shard_ranges) self.assert_container_listing([]) self.assert_container_post_ok('empty') self.assert_container_object_count(0) self.put_objects(['alpha']) self.assert_container_listing(['alpha']) self.assert_container_object_count(0) client.delete_container(self.url, self.token, self.container_name) self.assert_container_post_fails('deleted') self.assert_container_not_found() self.run_sharders(shard_ranges) self.assert_container_listing(['alpha']) self.assert_container_object_count(1) self.assert_container_delete_fails() self.assert_container_post_ok('revived') def test_object_update_redirection(self): all_obj_names = self._make_object_names(self.max_shard_size) self.put_objects(all_obj_names) client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) for n in self.brain.node_numbers: self.sharders.once( number=n, additional_args='--partitions=%s' % self.brain.part) for node in self.brain.nodes: self.assert_container_state(node, 'sharded', 2) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') self.assert_container_listing(all_obj_names) self.delete_objects(all_obj_names) self.assert_container_listing([]) self.assert_container_post_ok('has objects') shard_ranges = self.get_container_shard_ranges() self.assertLengthEqual(shard_ranges, 2) shard_partitions = [self.get_part_and_node_numbers(sr)[0] for sr in shard_ranges] for conf_index in self.configs['container-sharder'].keys(): self.run_custom_sharder(conf_index, {'reclaim_age': 0}, override_partitions=shard_partitions) self.assert_container_object_count(0) shard_part, shard_nodes = self.get_part_and_node_numbers( shard_ranges[0]) self.brain.servers.stop(number=shard_nodes[2]) self.put_objects(['alpha']) self.assert_container_listing(['alpha']) self.assert_container_object_count(0) self.assertLengthEqual( self.gather_async_pendings(self.get_all_object_nodes()), 1) self.brain.servers.start(number=shard_nodes[2]) self.sharders.once(additional_args='--partitions=%s' % self.brain.part) self.sharders.once(additional_args='--partitions=%s' % shard_part, number=shard_nodes[2]) self.assertLengthEqual(self.get_container_shard_ranges(), 1) self.assert_container_listing([], req_hdrs={'x-newest': 'true'}) self.assert_container_object_count(0) self.updaters.once() self.assert_container_listing(['alpha']) self.assert_container_object_count(0) for number in shard_nodes[:2]: self.sharders.once(additional_args='--partitions=%s' % shard_part, number=number) self.run_sharders(shard_ranges[1]) self.assert_container_listing(['alpha']) self.assert_container_object_count(1) self.assertLengthEqual(self.get_container_shard_ranges(), 1) shard_part, shard_nodes = self.get_part_and_node_numbers( shard_ranges[1]) self.brain.servers.stop(number=shard_nodes[2]) self.memcache.delete(get_cache_key( self.account, self.container_name, shard='updating')) self.delete_objects(['alpha']) self.put_objects(['beta']) self.assert_container_listing(['beta']) self.assert_container_object_count(1) self.assertLengthEqual( self.gather_async_pendings(self.get_all_object_nodes()), 2) self.brain.servers.start(number=shard_nodes[2]) self.sharders.once(additional_args='--partitions=%s' % self.brain.part) self.sharders.once(additional_args='--partitions=%s' % shard_part, number=shard_nodes[2]) self.assertFalse(self.get_container_shard_ranges()) for number in shard_nodes[:2]: self.brain.servers.stop(number=number) self.assert_container_listing(['alpha']) self.assert_container_object_count(1) # run the updaters: the async pending update will be redirected from # shrunk shard to the root self.updaters.once() self.assert_container_listing(['beta']) self.assert_container_object_count(1) def test_misplaced_object_movement(self): def merge_object(shard_range, name, deleted=0): # it's hard to get a test to put a misplaced object into a shard, shard_part, shard_nodes = self.brain.ring.get_nodes( shard_range.account, shard_range.container) shard_broker = self.get_broker( shard_part, shard_nodes[0], shard_range.account, shard_range.container) shard_broker.merge_items( [{'name': name, 'created_at': Timestamp.now().internal, 'size': 0, 'content_type': 'text/plain', 'etag': md5(usedforsecurity=False).hexdigest(), 'deleted': deleted, 'storage_policy_index': shard_broker.storage_policy_index}]) return shard_nodes[0] all_obj_names = self._make_object_names(self.max_shard_size) self.put_objects(all_obj_names) client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) for n in self.brain.node_numbers: self.sharders.once( number=n, additional_args='--partitions=%s' % self.brain.part) for node in self.brain.nodes: self.assert_container_state(node, 'sharded', 2) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') self.assert_container_listing(all_obj_names) shard_ranges = self.get_container_shard_ranges() self.assertLengthEqual(shard_ranges, 2) shard_0_objects = [name for name in all_obj_names if name in shard_ranges[0]] shard_1_objects = [name for name in all_obj_names if name in shard_ranges[1]] self.delete_objects(shard_0_objects) self.assert_container_listing(shard_1_objects) self.assert_container_post_ok('has objects') shard_0_part = self.get_part_and_node_numbers(shard_ranges[0])[0] for conf_index in self.configs['container-sharder'].keys(): self.run_custom_sharder(conf_index, {'reclaim_age': 0}, override_partitions=[shard_0_part]) self.assert_container_object_count(len(shard_1_objects)) self.sharders.once(additional_args='--partitions=%s' % self.brain.part) self.run_sharders(shard_ranges[0]) misplaced_node = merge_object(shard_ranges[0], 'alpha', deleted=0) self.assertLengthEqual(self.get_container_shard_ranges(), 1) self.assert_container_listing(shard_1_objects) self.assert_container_object_count(len(shard_1_objects)) shard_part, shard_nodes_numbers = self.get_part_and_node_numbers( shard_ranges[0]) self.sharders.once(additional_args='--partitions=%s' % shard_part, number=misplaced_node['id'] + 1) self.assert_container_listing(['alpha'] + shard_1_objects) self.assert_container_object_count(len(shard_1_objects)) self.run_sharders(shard_ranges[1]) self.assert_container_listing(['alpha'] + shard_1_objects) self.assert_container_object_count(len(shard_1_objects) + 1) self.assertLengthEqual(self.get_container_shard_ranges(), 1) self.delete_objects(shard_1_objects) shard_1_part = self.get_part_and_node_numbers(shard_ranges[1])[0] for conf_index in self.configs['container-sharder'].keys(): self.run_custom_sharder(conf_index, {'reclaim_age': 0}, override_partitions=[shard_1_part]) self.sharders.once(additional_args='--partitions=%s' % self.brain.part) self.run_sharders(shard_ranges[1]) self.assertFalse(self.get_container_shard_ranges()) merge_object(shard_ranges[1], 'alpha', deleted=1) misplaced_node = merge_object(shard_ranges[1], 'beta', deleted=0) self.assert_container_listing(['alpha']) self.assert_container_object_count(1) shard_part, shard_nodes_numbers = self.get_part_and_node_numbers( shard_ranges[1]) self.sharders.once(additional_args='--partitions=%s' % shard_part, number=misplaced_node['id'] + 1) self.assert_container_listing(['beta']) self.assert_container_object_count(1) self.assert_container_delete_fails() def test_misplaced_object_movement_from_deleted_shard(self): def merge_object(shard_range, name, deleted=0): # so this hack is used force an object record directly into a shard # container db. Note: the actual object won't exist, we're just # using this to test object records in container dbs. shard_part, shard_nodes = self.brain.ring.get_nodes( shard_range.account, shard_range.container) shard_broker = self.get_shard_broker(shard_range) # In this test we want to merge into a deleted container shard shard_broker.delete_db(Timestamp.now().internal) shard_broker.merge_items( [{'name': name, 'created_at': Timestamp.now().internal, 'size': 0, 'content_type': 'text/plain', 'etag': md5(usedforsecurity=False).hexdigest(), 'deleted': deleted, 'storage_policy_index': shard_broker.storage_policy_index}]) return shard_nodes[0] all_obj_names = self._make_object_names(self.max_shard_size) self.put_objects(all_obj_names) # Shard the container client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) for n in self.brain.node_numbers: self.sharders.once( number=n, additional_args='--partitions=%s' % self.brain.part) # sanity checks for node in self.brain.nodes: self.assert_container_state(node, 'sharded', 2) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') self.assert_container_listing(all_obj_names) # delete all objects in first shard range - updates redirected to shard shard_ranges = self.get_container_shard_ranges() self.assertLengthEqual(shard_ranges, 2) shard_0_objects = [name for name in all_obj_names if name in shard_ranges[0]] shard_1_objects = [name for name in all_obj_names if name in shard_ranges[1]] self.delete_objects(shard_0_objects) self.assert_container_listing(shard_1_objects) self.assert_container_post_ok('has objects') # run sharder on first shard container to update root stats shard_0_part = self.get_part_and_node_numbers(shard_ranges[0])[0] for conf_index in self.configs['container-sharder'].keys(): self.run_custom_sharder(conf_index, {'reclaim_age': 0}, override_partitions=[shard_0_part]) self.assert_container_object_count(len(shard_1_objects)) # First, test a misplaced object moving from one shard to another. # run sharder on root to discover first shrink candidate self.sharders.once(additional_args='--partitions=%s' % self.brain.part) # then run sharder on first shard range to shrink it self.run_sharders(shard_ranges[0]) # force a misplaced object into the shrunken shard range to simulate # a client put that was in flight when it started to shrink misplaced_node = merge_object(shard_ranges[0], 'alpha', deleted=0) # root sees first shard has shrunk, only second shard range used for # listing so alpha object not in listing self.assertLengthEqual(self.get_container_shard_ranges(), 1) self.assert_container_listing(shard_1_objects) self.assert_container_object_count(len(shard_1_objects)) # until sharder runs on that node to move the misplaced object to the # second shard range shard_part, shard_nodes_numbers = self.get_part_and_node_numbers( shard_ranges[0]) self.sharders.once(additional_args='--partitions=%s' % shard_part, number=misplaced_node['id'] + 1) self.assert_container_listing(['alpha'] + shard_1_objects) # root not yet updated self.assert_container_object_count(len(shard_1_objects)) # check the deleted shard did not push the wrong root path into the # other container for replica in 0, 1, 2: shard_x_broker = self.get_shard_broker(shard_ranges[1], replica) self.assertEqual("%s/%s" % (self.account, self.container_name), shard_x_broker.root_path) # run the sharder of the existing shard to update the root stats # to prove the misplaced object was moved to the other shard _and_ # the other shard still has the correct root because it updates root's self.run_sharders(shard_ranges[1]) self.assert_container_object_count(len(shard_1_objects) + 1) def test_replication_to_sharded_container_from_unsharded_old_primary(self): primary_ids = [n['id'] for n in self.brain.nodes] handoff_node = next(n for n in self.brain.ring.devs if n['id'] not in primary_ids) obj_names = self._setup_replication_scenario(2) for node in self.brain.nodes[:2]: self.assert_container_state(node, 'sharded', 2) node_numbers = self.brain.node_numbers new_primary_node = self.brain.nodes[2] new_primary_node_number = node_numbers[2] new_primary_dir, container_hash = self.get_storage_dir( self.brain.part, new_primary_node) old_primary_dir, container_hash = self.get_storage_dir( self.brain.part, handoff_node) utils.mkdirs(os.path.dirname(old_primary_dir)) shutil.move(new_primary_dir, old_primary_dir) self.brain.servers.start(number=new_primary_node_number) client.put_container(self.url, self.token, self.container_name) self.assertTrue(os.path.exists(os.path.join( new_primary_dir, container_hash + '.db'))) found = self.categorize_container_dir_content() self.assertLengthEqual(found['normal_dbs'], 1) self.assertLengthEqual(found['shard_dbs'], 2) failed_node = self.brain.nodes[0] failed_dir, _container_hash = self.get_storage_dir( self.brain.part, failed_node) shutil.rmtree(failed_dir) self.brain.servers.stop(number=new_primary_node_number) self.replicators.once(number=handoff_node['id'] + 1) # This may or may not have shard ranges, depending on the order in # which we hit the primaries, but it definitely *doesn't* have an self.assertTrue(os.path.exists(os.path.join( failed_dir, container_hash + '.db'))) self.assertLengthEqual(os.listdir(failed_dir), 1) broker = self.get_broker(self.brain.part, failed_node) self.assertLengthEqual(broker.get_objects(), len(obj_names) + 1) broker = self.get_broker(self.brain.part, self.brain.nodes[1]) self.assertLengthEqual(broker.get_objects(), 0) self.assertTrue(os.path.exists(os.path.join( old_primary_dir, container_hash + '.db'))) broker = self.get_broker(self.brain.part, handoff_node) shard_ranges = broker.get_shard_ranges() self.assertLengthEqual(shard_ranges, 2) self.assert_container_state(handoff_node, 'unsharded', 2) self.brain.servers.start(number=new_primary_node_number) self.replicators.once(number=handoff_node['id'] + 1) # but instead we wait broker = self.get_broker(self.brain.part, new_primary_node) self.assertLengthEqual(broker.get_objects(), 0) shard_ranges = broker.get_shard_ranges() self.assertLengthEqual(shard_ranges, 2) # so the next time the sharder comes along, it can push rows out # and delete the big db self.sharders.once(number=handoff_node['id'] + 1, additional_args='--partitions=%s' % self.brain.part) self.assert_container_state(handoff_node, 'sharded', 2) self.assertFalse(os.path.exists(os.path.join( old_primary_dir, container_hash + '.db'))) # the sharded db hangs around until replication confirms durability # first attempt is not sufficiently successful self.brain.servers.stop(number=node_numbers[0]) self.replicators.once(number=handoff_node['id'] + 1) self.assertTrue(os.path.exists(old_primary_dir)) self.assert_container_state(handoff_node, 'sharded', 2) # second attempt is successful and handoff db is deleted self.brain.servers.start(number=node_numbers[0]) self.replicators.once(number=handoff_node['id'] + 1) self.assertFalse(os.path.exists(old_primary_dir)) # run all the sharders, get us into a consistent state self.sharders.once(additional_args='--partitions=%s' % self.brain.part) self.assert_container_listing(['alpha'] + obj_names) def test_replication_to_empty_new_primary_from_sharding_old_primary(self): primary_ids = [n['id'] for n in self.brain.nodes] handoff_node = next(n for n in self.brain.ring.devs if n['id'] not in primary_ids) num_shards = 3 obj_names = self._make_object_names( num_shards * self.max_shard_size // 2) self.put_objects(obj_names) client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) # run replicators first time to get sync points set self.replicators.once() # start sharding on only the leader node leader_node = self.brain.nodes[0] leader_node_number = self.brain.node_numbers[0] self.sharders.once(number=leader_node_number) self.assert_container_state(leader_node, 'sharding', 3) for node in self.brain.nodes[1:]: self.assert_container_state(node, 'unsharded', 3) # Fake a ring change - copy leader node db to a handoff to create # illusion of a new unpopulated primary leader node new_primary_dir, container_hash = self.get_storage_dir( self.brain.part, leader_node) old_primary_dir, container_hash = self.get_storage_dir( self.brain.part, handoff_node) utils.mkdirs(os.path.dirname(old_primary_dir)) shutil.move(new_primary_dir, old_primary_dir) self.assert_container_state(handoff_node, 'sharding', 3) # run replicator on handoff node to create a fresh db on new primary self.assertFalse(os.path.exists(new_primary_dir)) self.replicators.once(number=handoff_node['id'] + 1) self.assertTrue(os.path.exists(new_primary_dir)) self.assert_container_state(leader_node, 'sharded', 3) broker = self.get_broker(self.brain.part, leader_node) shard_ranges = broker.get_shard_ranges() self.assertLengthEqual(shard_ranges, 3) self.assertEqual( [ShardRange.CLEAVED, ShardRange.CLEAVED, ShardRange.CREATED], [sr.state for sr in shard_ranges]) # db still exists on handoff self.assertTrue(os.path.exists(old_primary_dir)) self.assert_container_state(handoff_node, 'sharding', 3) # continue sharding it... self.sharders.once(number=handoff_node['id'] + 1) self.assert_container_state(leader_node, 'sharded', 3) # now handoff is fully sharded the replicator will delete it self.replicators.once(number=handoff_node['id'] + 1) self.assertFalse(os.path.exists(old_primary_dir)) # all primaries now have active shard ranges but only one is in sharded # state self.assert_container_state(leader_node, 'sharded', 3) for node in self.brain.nodes[1:]: self.assert_container_state(node, 'unsharded', 3) node_data = self.direct_get_container_shard_ranges() for node_id, (hdrs, shard_ranges) in node_data.items(): with annotate_failure( 'node id %s from %s' % (node_id, node_data.keys)): self.assert_shard_range_state(ShardRange.ACTIVE, shard_ranges) # check handoff cleaved all objects before it was deleted - stop all # but leader node so that listing is fetched from shards for number in self.brain.node_numbers[1:3]: self.brain.servers.stop(number=number) self.assert_container_listing(obj_names) for number in self.brain.node_numbers[1:3]: self.brain.servers.start(number=number) self.sharders.once() self.assert_container_state(leader_node, 'sharded', 3) for node in self.brain.nodes[1:]: self.assert_container_state(node, 'sharding', 3) self.sharders.once() for node in self.brain.nodes: self.assert_container_state(node, 'sharded', 3) self.assert_container_listing(obj_names) def test_sharded_account_updates(self): # verify that .shards account updates have zero object count and bytes # to avoid double accounting all_obj_names = self._make_object_names(self.max_shard_size) self.put_objects(all_obj_names, contents='xyz') # Shard the container into 2 shards client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) for n in self.brain.node_numbers: self.sharders.once( number=n, additional_args='--partitions=%s' % self.brain.part) # sanity checks for node in self.brain.nodes: shard_ranges = self.assert_container_state(node, 'sharded', 2) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') self.assert_container_listing(all_obj_names) # run the updaters to get account stats updated self.updaters.once() # check user account stats metadata = self.internal_client.get_account_metadata(self.account) self.assertEqual(1, int(metadata.get('x-account-container-count'))) self.assertEqual(self.max_shard_size, int(metadata.get('x-account-object-count'))) self.assertEqual(3 * self.max_shard_size, int(metadata.get('x-account-bytes-used'))) # check hidden .shards account stats metadata = self.internal_client.get_account_metadata( shard_ranges[0].account) self.assertEqual(2, int(metadata.get('x-account-container-count'))) self.assertEqual(0, int(metadata.get('x-account-object-count'))) self.assertEqual(0, int(metadata.get('x-account-bytes-used'))) class TestContainerShardingMoreUTF8(TestContainerSharding): def _make_object_names(self, number): # override default with names that include non-ascii chars name_length = self.cluster_info['swift']['max_object_name_length'] obj_names = [] for x in range(number): name = (u'obj-\u00e4\u00ea\u00ec\u00f2\u00fb-%04d' % x) name = name.encode('utf8').ljust(name_length, b'o') if not six.PY2: name = name.decode('utf8') obj_names.append(name) return obj_names def _setup_container_name(self): # override default with max length name that includes non-ascii chars super(TestContainerShardingMoreUTF8, self)._setup_container_name() name_length = self.cluster_info['swift']['max_container_name_length'] cont_name = \ self.container_name + u'-\u00e4\u00ea\u00ec\u00f2\u00fb\u1234' self.container_name = cont_name.encode('utf8').ljust(name_length, b'x') if not six.PY2: self.container_name = self.container_name.decode('utf8') class TestManagedContainerSharding(BaseTestContainerSharding): def sharders_once(self, **kwargs): # inhibit auto_sharding regardless of the config setting additional_args = kwargs.get('additional_args', []) if not isinstance(additional_args, list): additional_args = [additional_args] additional_args.append('--no-auto-shard') kwargs['additional_args'] = additional_args self.sharders.once(**kwargs) def test_manage_shard_ranges(self): obj_names = self._make_object_names(7) self.put_objects(obj_names) client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) # run replicators first time to get sync points set self.replicators.once() # sanity check: we don't have nearly enough objects for this to shard self.sharders_once(number=self.brain.node_numbers[0], additional_args='--partitions=%s' % self.brain.part) self.assert_container_state(self.brain.nodes[0], 'unsharded', 0) self.assert_subprocess_success([ 'swift-manage-shard-ranges', self.get_db_file(self.brain.part, self.brain.nodes[0]), 'find_and_replace', '3', '--enable', '--minimum-shard-size', '2']) self.assert_container_state(self.brain.nodes[0], 'unsharded', 2) self.replicators.once() self.sharders_once(additional_args='--partitions=%s' % self.brain.part) self.assert_container_state(self.brain.nodes[0], 'sharded', 2) self.assert_container_state(self.brain.nodes[1], 'sharded', 2) self.assert_container_state(self.brain.nodes[2], 'sharded', 2) self.assert_container_listing(obj_names) def test_manage_shard_ranges_compact(self): # verify shard range compaction using swift-manage-shard-ranges obj_names = self._make_object_names(8) self.put_objects(obj_names) client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) # run replicators first time to get sync points set, and get container # sharded into 4 shards self.replicators.once() self.assert_subprocess_success([ 'swift-manage-shard-ranges', self.get_db_file(self.brain.part, self.brain.nodes[0]), 'find_and_replace', '2', '--enable']) self.assert_container_state(self.brain.nodes[0], 'unsharded', 4) self.replicators.once() # run sharders twice to cleave all 4 shard ranges self.sharders_once(additional_args='--partitions=%s' % self.brain.part) self.sharders_once(additional_args='--partitions=%s' % self.brain.part) self.assert_container_state(self.brain.nodes[0], 'sharded', 4) self.assert_container_state(self.brain.nodes[1], 'sharded', 4) self.assert_container_state(self.brain.nodes[2], 'sharded', 4) self.assert_container_listing(obj_names) # now compact some ranges; use --max-shrinking to allow 2 shrinking # shards self.assert_subprocess_success([ 'swift-manage-shard-ranges', self.get_db_file(self.brain.part, self.brain.nodes[0]), 'compact', '--max-expanding', '1', '--max-shrinking', '2', '--yes']) shard_ranges = self.assert_container_state( self.brain.nodes[0], 'sharded', 4) self.assertEqual([ShardRange.SHRINKING] * 2 + [ShardRange.ACTIVE] * 2, [sr.state for sr in shard_ranges]) self.replicators.once() self.sharders_once() # check there's now just 2 remaining shard ranges shard_ranges = self.assert_container_state( self.brain.nodes[0], 'sharded', 2) self.assertEqual([ShardRange.ACTIVE] * 2, [sr.state for sr in shard_ranges]) self.assert_container_listing(obj_names, req_hdrs={'X-Newest': 'True'}) for i, node in enumerate(self.brain.nodes): with annotate_failure('node[%d]' % i): broker = self.get_broker(self.brain.part, self.brain.nodes[0]) self.assertEqual(ShardRange.SHARDED, broker.get_own_shard_range().state) self.assert_subprocess_success([ 'swift-manage-shard-ranges', self.get_db_file(self.brain.part, self.brain.nodes[0]), 'compact', '--yes', '--max-shrinking', '2']) shard_ranges = self.assert_container_state( self.brain.nodes[0], 'sharded', 2) self.assertEqual([ShardRange.SHRINKING] * 2, [sr.state for sr in shard_ranges]) self.replicators.once() self.sharders_once() self.assert_container_state(self.brain.nodes[0], 'collapsed', 0) self.assert_container_listing(obj_names, req_hdrs={'X-Newest': 'True'}) for i, node in enumerate(self.brain.nodes): with annotate_failure('node[%d]' % i): broker = self.get_broker(self.brain.part, self.brain.nodes[0]) self.assertEqual(ShardRange.ACTIVE, broker.get_own_shard_range().state) def test_manage_shard_ranges_repair_root(self): obj_names = self._make_object_names(16) self.put_objects(obj_names) client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) self.replicators.once() # 0.2 and 0.3 that are installed with epoch_0 self.assert_subprocess_success([ 'swift-manage-shard-ranges', self.get_db_file(self.brain.part, self.brain.nodes[0]), 'find_and_replace', '4', '--enable']) shard_ranges_0 = self.assert_container_state(self.brain.nodes[0], 'unsharded', 4) # *Also* go find 3 shard ranges on *another node*, like a dumb-dumb - # let's denote these ranges 1.0, 1.1 and 1.2 that are installed with self.assert_subprocess_success([ 'swift-manage-shard-ranges', self.get_db_file(self.brain.part, self.brain.nodes[1]), 'find_and_replace', '7', '--enable']) shard_ranges_1 = self.assert_container_state(self.brain.nodes[1], 'unsharded', 3) self.sharders_once(number=self.brain.node_numbers[0], additional_args='--partitions=%s' % self.brain.part) # same state overlap), creates its shards and then syncs shard ranges # with the other replicas. All of the 7 shard ranges on this replica # are now in CREATED state so it proceeds to cleave the first two shard # ranges, 0.1 and 1.0. self.sharders_once(number=self.brain.node_numbers[1], additional_args='--partitions=%s' % self.brain.part) self.replicators.once() # Uh-oh self.assert_container_state(self.brain.nodes[0], 'sharding', 7) self.assert_container_state(self.brain.nodes[1], 'sharding', 7) # There's a race: the third replica may be sharding, may be unsharded self.sharders_once(additional_args='--partitions=%s' % self.brain.part) self.replicators.once() self.sharders_once(additional_args='--partitions=%s' % self.brain.part) # overlapping ranges which prevents cleaving proceeding; expect the # shard ranges to be mostly still in created state, with one or two # possibly cleaved during first pass before the sharding got stalled shard_ranges = self.assert_container_state(self.brain.nodes[0], 'sharding', 7) self.assertEqual([ShardRange.CLEAVED] * 2 + [ShardRange.CREATED] * 5, [sr.state for sr in shard_ranges]) shard_ranges = self.assert_container_state(self.brain.nodes[1], 'sharding', 7) self.assertEqual([ShardRange.CLEAVED] * 2 + [ShardRange.CREATED] * 5, [sr.state for sr in shard_ranges]) # But hey, at least listings still work! They're just going to get self.assert_container_listing(obj_names) # path with most cleaving progress, and so shrink shard ranges 0.*. db_file = self.get_db_file(self.brain.part, self.brain.nodes[0]) self.assert_subprocess_success( ['swift-manage-shard-ranges', db_file, 'repair', '--yes']) # make sure all root replicas now sync their shard ranges self.replicators.once() # Run sharder on the shrinking shards. This should not change the state # of any of the acceptors, particularly the ones that have yet to have # object cleaved from the roots, because we don't want the as yet self.run_sharders(shard_ranges_0) shard_ranges = self.assert_container_state(self.brain.nodes[1], 'sharding', 3) self.assertEqual([ShardRange.CLEAVED] * 1 + [ShardRange.CREATED] * 2, [sr.state for sr in shard_ranges]) self.assert_container_listing(obj_names) for shard_range in shard_ranges_0: with annotate_failure(shard_range): found_for_shard = self.categorize_container_dir_content( shard_range.account, shard_range.container) self.assertLengthEqual(found_for_shard['shard_dbs'], 3) actual = [] for shard_db in found_for_shard['shard_dbs']: broker = ContainerBroker(shard_db) own_sr = broker.get_own_shard_range() actual.append( (broker.get_db_state(), own_sr.state, own_sr.deleted)) self.assertEqual([(SHARDED, ShardRange.SHRUNK, True)] * 3, actual) self.sharders_once() self.sharders_once() brokers = {} exp_shard_ranges = sorted( [sr.copy(state=ShardRange.SHRUNK, deleted=True) for sr in shard_ranges_0] + [sr.copy(state=ShardRange.ACTIVE) for sr in shard_ranges_1], key=ShardRange.sort_key) for node in (0, 1, 2): with annotate_failure('node %s' % node): broker = self.get_broker(self.brain.part, self.brain.nodes[node]) brokers[node] = broker shard_ranges = broker.get_shard_ranges() self.assertEqual(shard_ranges_1, shard_ranges) shard_ranges = broker.get_shard_ranges(include_deleted=True) self.assertLengthEqual(shard_ranges, len(exp_shard_ranges)) self.maxDiff = None self.assertEqual(exp_shard_ranges, shard_ranges) self.assertEqual(ShardRange.SHARDED, broker._own_shard_range().state) self.assertEqual(UNSHARDED, brokers[0].get_db_state()) self.assertEqual(SHARDED, brokers[1].get_db_state()) self.assertEqual(SHARDED, brokers[2].get_db_state()) epoch_1 = brokers[1].db_epoch self.assertEqual(epoch_1, brokers[2].db_epoch) self.assertLess(brokers[0].db_epoch, epoch_1) # the root replica that thinks it is unsharded is problematic - it will # not return shard ranges for listings, but has no objects, so it's self.sharders_once() for node in (0, 1, 2): with annotate_failure('node %s' % node): broker = self.get_broker(self.brain.part, self.brain.nodes[node]) brokers[node] = broker shard_ranges = broker.get_shard_ranges() self.assertEqual(shard_ranges_1, shard_ranges) shard_ranges = broker.get_shard_ranges(include_deleted=True) self.assertLengthEqual(shard_ranges, len(exp_shard_ranges)) self.assertEqual(exp_shard_ranges, shard_ranges) self.assertEqual(ShardRange.SHARDED, broker._own_shard_range().state) self.assertEqual(epoch_1, broker.db_epoch) self.assertIn(brokers[0].get_db_state(), (SHARDING, SHARDED)) self.assertEqual(SHARDED, brokers[1].get_db_state()) self.assertEqual(SHARDED, brokers[2].get_db_state()) for shard_range in shard_ranges_1: with annotate_failure(shard_range): found_for_shard = self.categorize_container_dir_content( shard_range.account, shard_range.container) self.assertLengthEqual(found_for_shard['normal_dbs'], 3) actual = [] for shard_db in found_for_shard['normal_dbs']: broker = ContainerBroker(shard_db) own_sr = broker.get_own_shard_range() actual.append( (broker.get_db_state(), own_sr.state, own_sr.deleted)) self.assertEqual([(UNSHARDED, ShardRange.ACTIVE, False)] * 3, actual) # replicate it as per a normal cleave. self.sharders_once() for node in (0, 1, 2): with annotate_failure('node %s' % node): broker = self.get_broker(self.brain.part, self.brain.nodes[node]) brokers[node] = broker shard_ranges = broker.get_shard_ranges() self.assertEqual(shard_ranges_1, shard_ranges) shard_ranges = broker.get_shard_ranges(include_deleted=True) self.assertLengthEqual(shard_ranges, len(exp_shard_ranges)) self.assertEqual(exp_shard_ranges, shard_ranges) self.assertEqual(ShardRange.SHARDED, broker._own_shard_range().state) self.assertEqual(epoch_1, broker.db_epoch) self.assertEqual(SHARDED, broker.get_db_state()) # Finally, with all root replicas in a consistent state, the listing # will be be predictably correct self.assert_container_listing(obj_names) def test_manage_shard_ranges_repair_shard(self): # provoke overlaps in a shard container and repair them obj_names = self._make_object_names(24) initial_obj_names = obj_names[::2] # put 12 objects in container self.put_objects(initial_obj_names) client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) # run replicators first time to get sync points set self.replicators.once() # find 3 shard ranges on root nodes[0] and get the root sharded self.assert_subprocess_success([ 'swift-manage-shard-ranges', self.get_db_file(self.brain.part, self.brain.nodes[0]), 'find_and_replace', '4', '--enable']) self.replicators.once() # cleave first two shards self.sharders_once(additional_args='--partitions=%s' % self.brain.part) # cleave third shard self.sharders_once(additional_args='--partitions=%s' % self.brain.part) # ensure all shards learn their ACTIVE state from root self.sharders_once() for node in (0, 1, 2): with annotate_failure('node %d' % node): shard_ranges = self.assert_container_state( self.brain.nodes[node], 'sharded', 3) for sr in shard_ranges: self.assertEqual(ShardRange.ACTIVE, sr.state) self.assert_container_listing(initial_obj_names) # add objects to second shard range so it has 8 objects ; this range # has bounds (obj-0006,obj-0014] root_shard_ranges = self.get_container_shard_ranges() self.assertEqual(3, len(root_shard_ranges)) shard_1 = root_shard_ranges[1] self.assertEqual(obj_names[6], shard_1.lower) self.assertEqual(obj_names[14], shard_1.upper) more_obj_names = obj_names[7:15:2] self.put_objects(more_obj_names) expected_obj_names = sorted(initial_obj_names + more_obj_names) self.assert_container_listing(expected_obj_names) shard_1_part, shard_1_nodes = self.brain.ring.get_nodes( shard_1.account, shard_1.container) # find 3 sub-shards on one shard node; use --force-commits to ensure # the recently PUT objects are included when finding the shard range # pivot points self.assert_subprocess_success([ 'swift-manage-shard-ranges', '--force-commits', self.get_db_file(shard_1_part, shard_1_nodes[1], shard_1.account, shard_1.container), 'find_and_replace', '3', '--enable']) # ... and mistakenly find 4 shard ranges on a different shard node :( self.assert_subprocess_success([ 'swift-manage-shard-ranges', '--force-commits', self.get_db_file(shard_1_part, shard_1_nodes[2], shard_1.account, shard_1.container), 'find_and_replace', '2', '--enable']) # replicate the muddle of shard ranges between shard replicas, merged # result is: # '' - 6 shard ACTIVE # 6 - 8 sub-shard FOUND # 6 - 9 sub-shard FOUND # 8 - 10 sub-shard FOUND # 9 - 12 sub-shard FOUND # 10 - 12 sub-shard FOUND # 12 - 14 sub-shard FOUND # 12 - 14 sub-shard FOUND # 6 - 14 shard SHARDING # 14 - '' shard ACTIVE self.replicators.once() # try hard to shard the shard... self.sharders_once(additional_args='--partitions=%s' % shard_1_part) self.sharders_once(additional_args='--partitions=%s' % shard_1_part) self.sharders_once(additional_args='--partitions=%s' % shard_1_part) # sharding hasn't completed and there's overlaps in the shard and root: # the sub-shards will have been cleaved in the order listed above, but # sub-shards (10 -12) and one of (12 - 14) will be overlooked because # the cleave cursor will have moved past their namespace before they # were yielded by the shard range iterator, so we now have: # '' - 6 shard ACTIVE # 6 - 8 sub-shard ACTIVE # 6 - 9 sub-shard ACTIVE # 8 - 10 sub-shard ACTIVE # 10 - 12 sub-shard CREATED # 9 - 12 sub-shard ACTIVE # 12 - 14 sub-shard CREATED # 12 - 14 sub-shard ACTIVE # 14 - '' shard ACTIVE sub_shard_ranges = self.get_container_shard_ranges( shard_1.account, shard_1.container) self.assertEqual(7, len(sub_shard_ranges), sub_shard_ranges) root_shard_ranges = self.get_container_shard_ranges() self.assertEqual(9, len(root_shard_ranges), root_shard_ranges) self.assertEqual([ShardRange.ACTIVE] * 4 + [ShardRange.CREATED, ShardRange.ACTIVE] * 2 + [ShardRange.ACTIVE], [sr.state for sr in root_shard_ranges]) # fix the overlaps - a set of 3 ACTIVE sub-shards will be chosen and 4 # other sub-shards will be shrunk away; apply the fix at the root # container db_file = self.get_db_file(self.brain.part, self.brain.nodes[0]) self.assert_subprocess_success( ['swift-manage-shard-ranges', db_file, 'repair', '--yes']) self.replicators.once() self.sharders_once() self.sharders_once() # check root now has just 5 shard ranges root_shard_ranges = self.get_container_shard_ranges() self.assertEqual(5, len(root_shard_ranges), root_shard_ranges) self.assertEqual([ShardRange.ACTIVE] * 5, [sr.state for sr in root_shard_ranges]) # check there are 1 sharded shard and 4 shrunk sub-shard ranges in the # root (note, shard_1's shard ranges aren't updated once it has sharded # because the sub-shards report their state to the root; we cannot make # assertions about shrunk states in shard_1's shard range table) root_shard_ranges = self.get_container_shard_ranges( include_deleted=True) self.assertEqual(10, len(root_shard_ranges), root_shard_ranges) shrunk_shard_ranges = [sr for sr in root_shard_ranges if sr.state == ShardRange.SHRUNK] self.assertEqual(4, len(shrunk_shard_ranges), root_shard_ranges) self.assertEqual([True] * 4, [sr.deleted for sr in shrunk_shard_ranges]) sharded_shard_ranges = [sr for sr in root_shard_ranges if sr.state == ShardRange.SHARDED] self.assertEqual(1, len(sharded_shard_ranges), root_shard_ranges) self.assert_container_listing(expected_obj_names)
true
true
f702a83360c91a0c7d5e24011db3653f658bd555
394
py
Python
test/test_google_search.py
mtkumar123/CSC510_Project_LectureAid
678e39596ca583eda961413118781bafa02c6f1d
[ "MIT" ]
null
null
null
test/test_google_search.py
mtkumar123/CSC510_Project_LectureAid
678e39596ca583eda961413118781bafa02c6f1d
[ "MIT" ]
45
2021-09-13T00:26:54.000Z
2021-09-30T01:12:53.000Z
test/test_google_search.py
mtkumar123/CSC510_Project_LectureAid
678e39596ca583eda961413118781bafa02c6f1d
[ "MIT" ]
2
2021-09-29T00:40:56.000Z
2021-10-17T14:28:25.000Z
import unittest from code.google_search import get_people_also_ask_links class TestGoogleSearch(unittest.TestCase): def setUp(self) -> None: pass def test_get_people_also_ask_links(self): """Test the get_people_also_ask_links method""" test = "principal components" result = get_people_also_ask_links(test) self.assertEqual(list, type(result))
30.307692
56
0.72335
import unittest from code.google_search import get_people_also_ask_links class TestGoogleSearch(unittest.TestCase): def setUp(self) -> None: pass def test_get_people_also_ask_links(self): test = "principal components" result = get_people_also_ask_links(test) self.assertEqual(list, type(result))
true
true
f702aa423d4c47e5a80244f89f501ffc9d1a9a08
4,584
py
Python
cinder/tests/api/contrib/test_volume_image_metadata.py
CloudVPS/cinder
9097b9407b6ce16c7b5678682284a0ad0fcc652d
[ "Apache-2.0" ]
null
null
null
cinder/tests/api/contrib/test_volume_image_metadata.py
CloudVPS/cinder
9097b9407b6ce16c7b5678682284a0ad0fcc652d
[ "Apache-2.0" ]
null
null
null
cinder/tests/api/contrib/test_volume_image_metadata.py
CloudVPS/cinder
9097b9407b6ce16c7b5678682284a0ad0fcc652d
[ "Apache-2.0" ]
null
null
null
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import json import uuid from xml.dom import minidom import webob from cinder.api import common from cinder.api.openstack.wsgi import MetadataXMLDeserializer from cinder.api.openstack.wsgi import XMLDeserializer from cinder import db from cinder import test from cinder.tests.api import fakes from cinder import volume def fake_volume_get(*args, **kwargs): return { 'id': 'fake', 'host': 'host001', 'status': 'available', 'size': 5, 'availability_zone': 'somewhere', 'created_at': datetime.datetime.now(), 'attach_status': None, 'display_name': 'anothervolume', 'display_description': 'Just another volume!', 'volume_type_id': None, 'snapshot_id': None, 'project_id': 'fake', } def fake_volume_get_all(*args, **kwargs): return [fake_volume_get()] fake_image_metadata = { 'image_id': 'someid', 'image_name': 'fake', 'kernel_id': 'somekernel', 'ramdisk_id': 'someramdisk', } def fake_get_volume_image_metadata(*args, **kwargs): return fake_image_metadata def fake_get_volumes_image_metadata(*args, **kwargs): return {'fake': fake_image_metadata} class VolumeImageMetadataTest(test.TestCase): content_type = 'application/json' def setUp(self): super(VolumeImageMetadataTest, self).setUp() self.stubs.Set(volume.API, 'get', fake_volume_get) self.stubs.Set(volume.API, 'get_all', fake_volume_get_all) self.stubs.Set(volume.API, 'get_volume_image_metadata', fake_get_volume_image_metadata) self.stubs.Set(volume.API, 'get_volumes_image_metadata', fake_get_volumes_image_metadata) self.stubs.Set(db, 'volume_get', fake_volume_get) self.UUID = uuid.uuid4() def _make_request(self, url): req = webob.Request.blank(url) req.accept = self.content_type res = req.get_response(fakes.wsgi_app()) return res def _get_image_metadata(self, body): return json.loads(body)['volume']['volume_image_metadata'] def _get_image_metadata_list(self, body): return [ volume['volume_image_metadata'] for volume in json.loads(body)['volumes'] ] def test_get_volume(self): res = self._make_request('/v2/fake/volumes/%s' % self.UUID) self.assertEqual(res.status_int, 200) self.assertEqual(self._get_image_metadata(res.body), fake_image_metadata) def test_list_detail_volumes(self): res = self._make_request('/v2/fake/volumes/detail') self.assertEqual(res.status_int, 200) self.assertEqual(self._get_image_metadata_list(res.body)[0], fake_image_metadata) class ImageMetadataXMLDeserializer(common.MetadataXMLDeserializer): metadata_node_name = "volume_image_metadata" class VolumeImageMetadataXMLTest(VolumeImageMetadataTest): content_type = 'application/xml' def _get_image_metadata(self, body): deserializer = XMLDeserializer() volume = deserializer.find_first_child_named( minidom.parseString(body), 'volume') image_metadata = deserializer.find_first_child_named( volume, 'volume_image_metadata') return MetadataXMLDeserializer().extract_metadata(image_metadata) def _get_image_metadata_list(self, body): deserializer = XMLDeserializer() volumes = deserializer.find_first_child_named( minidom.parseString(body), 'volumes') volume_list = deserializer.find_children_named(volumes, 'volume') image_metadata_list = [ deserializer.find_first_child_named( volume, 'volume_image_metadata' ) for volume in volume_list] return map(MetadataXMLDeserializer().extract_metadata, image_metadata_list)
32.978417
77
0.681065
import datetime import json import uuid from xml.dom import minidom import webob from cinder.api import common from cinder.api.openstack.wsgi import MetadataXMLDeserializer from cinder.api.openstack.wsgi import XMLDeserializer from cinder import db from cinder import test from cinder.tests.api import fakes from cinder import volume def fake_volume_get(*args, **kwargs): return { 'id': 'fake', 'host': 'host001', 'status': 'available', 'size': 5, 'availability_zone': 'somewhere', 'created_at': datetime.datetime.now(), 'attach_status': None, 'display_name': 'anothervolume', 'display_description': 'Just another volume!', 'volume_type_id': None, 'snapshot_id': None, 'project_id': 'fake', } def fake_volume_get_all(*args, **kwargs): return [fake_volume_get()] fake_image_metadata = { 'image_id': 'someid', 'image_name': 'fake', 'kernel_id': 'somekernel', 'ramdisk_id': 'someramdisk', } def fake_get_volume_image_metadata(*args, **kwargs): return fake_image_metadata def fake_get_volumes_image_metadata(*args, **kwargs): return {'fake': fake_image_metadata} class VolumeImageMetadataTest(test.TestCase): content_type = 'application/json' def setUp(self): super(VolumeImageMetadataTest, self).setUp() self.stubs.Set(volume.API, 'get', fake_volume_get) self.stubs.Set(volume.API, 'get_all', fake_volume_get_all) self.stubs.Set(volume.API, 'get_volume_image_metadata', fake_get_volume_image_metadata) self.stubs.Set(volume.API, 'get_volumes_image_metadata', fake_get_volumes_image_metadata) self.stubs.Set(db, 'volume_get', fake_volume_get) self.UUID = uuid.uuid4() def _make_request(self, url): req = webob.Request.blank(url) req.accept = self.content_type res = req.get_response(fakes.wsgi_app()) return res def _get_image_metadata(self, body): return json.loads(body)['volume']['volume_image_metadata'] def _get_image_metadata_list(self, body): return [ volume['volume_image_metadata'] for volume in json.loads(body)['volumes'] ] def test_get_volume(self): res = self._make_request('/v2/fake/volumes/%s' % self.UUID) self.assertEqual(res.status_int, 200) self.assertEqual(self._get_image_metadata(res.body), fake_image_metadata) def test_list_detail_volumes(self): res = self._make_request('/v2/fake/volumes/detail') self.assertEqual(res.status_int, 200) self.assertEqual(self._get_image_metadata_list(res.body)[0], fake_image_metadata) class ImageMetadataXMLDeserializer(common.MetadataXMLDeserializer): metadata_node_name = "volume_image_metadata" class VolumeImageMetadataXMLTest(VolumeImageMetadataTest): content_type = 'application/xml' def _get_image_metadata(self, body): deserializer = XMLDeserializer() volume = deserializer.find_first_child_named( minidom.parseString(body), 'volume') image_metadata = deserializer.find_first_child_named( volume, 'volume_image_metadata') return MetadataXMLDeserializer().extract_metadata(image_metadata) def _get_image_metadata_list(self, body): deserializer = XMLDeserializer() volumes = deserializer.find_first_child_named( minidom.parseString(body), 'volumes') volume_list = deserializer.find_children_named(volumes, 'volume') image_metadata_list = [ deserializer.find_first_child_named( volume, 'volume_image_metadata' ) for volume in volume_list] return map(MetadataXMLDeserializer().extract_metadata, image_metadata_list)
true
true